From 89348a1c633483032d6844f34e89d2737f02abd6 Mon Sep 17 00:00:00 2001 From: Vincent Demeester Date: Mon, 2 Jul 2018 15:29:10 +0200 Subject: [PATCH] Update `docker/cli` with upstream changes This is still depending on a fork, but with way less differents commits (several changes have been already upstreamed) and with an up-to-date `docker/cli`. One big change to notice is the `enabled` becomes `x-enabled` (using *extras* field of the composefile format), and thus does not require any change to the composefile struct. Signed-off-by: Vincent Demeester --- Gopkg.lock | 99 +- Gopkg.toml | 16 +- cmd/docker-app/deploy.go | 43 +- .../templates/stack-v1beta1.yaml | 2 +- .../helm-expected.chart/templates/stack.yaml | 2 +- internal/renderer/helm.go | 73 +- internal/renderer/render.go | 72 +- internal/renderer/render_test.go | 55 + internal/templateloader/loader.go | 4 +- vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/stream.go | 316 + vendor/github.com/docker/cli/AUTHORS | 12 +- vendor/github.com/docker/cli/cli/cobra.go | 16 +- .../github.com/docker/cli/cli/command/cli.go | 17 +- .../cli/cli/command/formatter/disk_usage.go | 38 +- .../cli/cli/command/formatter/service.go | 39 +- .../docker/cli/cli/command/formatter/stack.go | 36 +- .../cli/cli/command/idresolver/idresolver.go | 2 +- .../docker/cli/cli/command/node/cmd.go | 2 +- .../docker/cli/cli/command/node/inspect.go | 2 +- .../docker/cli/cli/command/node/list.go | 2 +- .../docker/cli/cli/command/node/ps.go | 2 +- .../docker/cli/cli/command/node/remove.go | 3 +- .../docker/cli/cli/command/node/update.go | 2 +- .../docker/cli/cli/command/orchestrator.go | 68 +- .../docker/cli/cli/command/registry.go | 2 +- .../docker/cli/cli/command/service/create.go | 4 +- .../docker/cli/cli/command/service/helpers.go | 2 +- .../docker/cli/cli/command/service/inspect.go | 3 +- .../docker/cli/cli/command/service/list.go | 2 +- .../docker/cli/cli/command/service/logs.go | 3 +- .../docker/cli/cli/command/service/opts.go | 7 +- .../docker/cli/cli/command/service/parse.go | 3 +- .../cli/command/service/progress/progress.go | 2 +- .../docker/cli/cli/command/service/ps.go | 2 +- .../docker/cli/cli/command/service/remove.go | 2 +- .../docker/cli/cli/command/service/scale.go | 3 +- .../docker/cli/cli/command/service/trust.go | 2 +- .../docker/cli/cli/command/service/update.go | 116 +- .../docker/cli/cli/command/stack/cmd.go | 119 + .../docker/cli/cli/command/stack/common.go | 31 + .../docker/cli/cli/command/stack/deploy.go | 90 + .../cli/cli/command/stack/kubernetes/cli.go | 97 +- .../cli/command/stack/kubernetes/client.go | 36 +- .../command/stack/kubernetes/conversion.go | 133 +- .../cli/command/stack/kubernetes/convert.go | 98 +- .../cli/command/stack/kubernetes/deploy.go | 138 +- .../cli/cli/command/stack/kubernetes/list.go | 136 +- .../cli/cli/command/stack/kubernetes/ps.go | 92 +- .../cli/command/stack/kubernetes/remove.go | 13 +- .../cli/command/stack/kubernetes/services.go | 118 +- .../cli/cli/command/stack/kubernetes/stack.go | 35 +- .../command/stack/kubernetes/stackclient.go | 75 +- .../cli/command/stack/kubernetes/warnings.go | 145 + .../cli/command/stack/kubernetes/watcher.go | 285 +- .../docker/cli/cli/command/stack/list.go | 79 + .../cli/cli/command/stack/options/opts.go | 4 +- .../docker/cli/cli/command/stack/ps.go | 48 + .../docker/cli/cli/command/stack/remove.go | 43 + .../docker/cli/cli/command/stack/services.go | 46 + .../cli/cli/command/stack/swarm/common.go | 45 +- .../cli/cli/command/stack/swarm/deploy.go | 20 +- .../command/stack/swarm/deploy_bundlefile.go | 6 +- .../command/stack/swarm/deploy_composefile.go | 22 +- .../cli/cli/command/stack/swarm/list.go | 45 +- .../docker/cli/cli/command/stack/swarm/ps.go | 10 +- .../cli/cli/command/stack/swarm/remove.go | 7 +- .../cli/cli/command/stack/swarm/services.go | 2 +- .../docker/cli/cli/command/task/print.go | 2 +- .../docker/cli/cli/compose/convert/service.go | 8 +- .../compose/interpolation/interpolation.go | 15 +- .../cli/cli/compose/loader/interpolate.go | 10 +- .../docker/cli/cli/compose/loader/loader.go | 114 +- .../docker/cli/cli/compose/schema/bindata.go | 45 +- .../docker/cli/cli/compose/schema/schema.go | 2 +- .../cli/cli/compose/template/template.go | 136 +- .../docker/cli/cli/compose/types/types.go | 69 +- .../docker/cli/cli/config/configfile/file.go | 25 +- .../config/credentials/default_store_linux.go | 4 +- .../github.com/docker/cli/cli/flags/client.go | 1 - .../github.com/docker/cli/cli/flags/common.go | 15 +- .../docker/cli/cli/registry/client/client.go | 4 +- .../cli/cli/registry/client/endpoint.go | 2 +- .../docker/cli/cli/registry/client/fetcher.go | 2 +- .../github.com/docker/cli/cli/trust/trust.go | 2 +- .../client/clientset/scheme/register.go | 8 +- .../typed/compose/v1beta1/compose_client.go | 6 +- .../clientset/typed/compose/v1beta1/stack.go | 12 +- .../typed/compose/v1beta2/compose_client.go | 6 +- .../clientset/typed/compose/v1beta2/stack.go | 12 +- .../cli/kubernetes/compose/v1beta1/doc.go | 3 +- .../cli/kubernetes/compose/v1beta1/parsing.go | 4 + .../cli/kubernetes/compose/v1beta2/stack.go | 128 +- .../docker/cli/kubernetes/config.go | 14 +- vendor/github.com/docker/cli/opts/opts.go | 36 + vendor/github.com/docker/cli/opts/port.go | 4 + .../github.com/docker/distribution/blobs.go | 2 +- .../docker/distribution/context/context.go | 85 - .../docker/distribution/context/doc.go | 89 - .../docker/distribution/context/http.go | 366 - .../docker/distribution/context/logger.go | 116 - .../docker/distribution/context/trace.go | 104 - .../docker/distribution/context/util.go | 24 - .../docker/distribution/context/version.go | 16 - .../distribution/manifest/schema2/builder.go | 3 +- .../docker/distribution/manifests.go | 2 +- .../docker/distribution/metrics/prometheus.go | 13 + .../docker/distribution/registry.go | 18 +- .../registry/api/errcode/handler.go | 6 +- .../client/auth/challenge/authchallenge.go | 8 +- .../registry/client/auth/session.go | 33 +- .../registry/client/blob_writer.go | 2 +- .../registry/client/repository.go | 68 +- .../cache/cachedblobdescriptorstore.go | 40 +- .../registry/storage/cache/memory/memory.go | 2 +- vendor/github.com/docker/distribution/tags.go | 2 +- .../pass/pass_linux.go | 239 - vendor/github.com/docker/docker/AUTHORS | 36 +- vendor/github.com/docker/docker/api/common.go | 4 +- .../docker/docker/api/common_unix.go | 2 +- .../docker/docker/api/types/client.go | 18 +- .../docker/api/types/container/host_config.go | 6 + .../docker/docker/api/types/plugin.go | 3 + .../docker/docker/api/types/port.go | 2 +- .../docker/api/types/swarm/container.go | 1 + .../docker/docker/api/types/swarm/runtime.go | 8 + .../docker/docker/api/types/swarm/task.go | 11 +- .../docker/docker/api/types/time/timestamp.go | 11 +- .../docker/docker/api/types/types.go | 17 +- .../{volumes_create.go => volume_create.go} | 8 +- .../{volumes_list.go => volume_list.go} | 8 +- .../docker/docker/client/build_cancel.go | 21 + .../github.com/docker/docker/client/client.go | 5 + .../docker/docker/client/container_logs.go | 5 +- .../docker/docker/client/container_stop.go | 9 +- .../github.com/docker/docker/client/errors.go | 1 - .../github.com/docker/docker/client/hijack.go | 105 +- .../docker/docker/client/image_build.go | 4 + .../docker/docker/client/interface.go | 7 +- .../docker/docker/client/node_remove.go | 3 +- .../docker/docker/client/service_create.go | 4 +- .../docker/docker/client/service_logs.go | 3 +- .../docker/docker/client/tlsconfig_clone.go | 11 - .../docker/client/tlsconfig_clone_go17.go | 33 - .../docker/docker/client/volume_create.go | 2 +- .../docker/docker/client/volume_list.go | 4 +- .../docker-engine-selinux/LICENSE | 339 - .../docker-engine-selinux/LICENSE | 340 - vendor/github.com/docker/docker/errdefs/is.go | 2 +- .../docker/docker/pkg/archive/archive.go | 1 + .../docker/pkg/archive/archive_windows.go | 2 +- .../docker/docker/pkg/archive/diff.go | 4 +- .../docker/docker/pkg/archive/time_linux.go | 2 +- .../docker/docker/pkg/fileutils/fileutils.go | 2 +- .../docker/docker/pkg/idtools/idtools.go | 4 +- .../docker/pkg/jsonmessage/jsonmessage.go | 10 +- .../docker/docker/pkg/stdcopy/stdcopy.go | 2 +- .../docker/docker/pkg/term/proxy.go | 4 + .../docker/docker/pkg/term/windows/windows.go | 2 +- .../docker/docker/registry/endpoint_v1.go | 2 +- .../docker/docker/registry/session.go | 2 +- .../github.com/docker/go-metrics/LICENSE.code | 191 + .../github.com/docker/go-metrics/LICENSE.docs | 425 ++ vendor/github.com/docker/go-metrics/NOTICE | 16 + .../github.com/docker/go-metrics/counter.go | 52 + vendor/github.com/docker/go-metrics/docs.go | 3 + vendor/github.com/docker/go-metrics/gauge.go | 72 + .../github.com/docker/go-metrics/handler.go | 13 + .../github.com/docker/go-metrics/helpers.go | 10 + .../github.com/docker/go-metrics/namespace.go | 181 + .../github.com/docker/go-metrics/register.go | 15 + vendor/github.com/docker/go-metrics/timer.go | 68 + vendor/github.com/docker/go-metrics/unit.go | 12 + vendor/github.com/docker/swarmkit/LICENSE | 2 +- .../github.com/docker/swarmkit/api/ca.pb.go | 53 +- .../docker/swarmkit/api/control.pb.go | 1101 ++- .../docker/swarmkit/api/deepcopy/copy.go | 3 + .../docker/swarmkit/api/dispatcher.pb.go | 75 +- .../docker/swarmkit/api/health.pb.go | 37 +- .../docker/swarmkit/api/logbroker.pb.go | 67 +- .../docker/swarmkit/api/objects.pb.go | 705 +- .../github.com/docker/swarmkit/api/raft.pb.go | 73 +- .../docker/swarmkit/api/resource.pb.go | 41 +- .../docker/swarmkit/api/snapshot.pb.go | 47 +- .../docker/swarmkit/api/specs.pb.go | 253 +- .../docker/swarmkit/api/types.pb.go | 1797 ++--- .../docker/swarmkit/api/watch.pb.go | 77 +- .../swarmkit/protobuf/plugin/plugin.pb.go | 31 +- vendor/github.com/hashicorp/golang-lru/2q.go | 223 + .../github.com/hashicorp/golang-lru/LICENSE | 362 + vendor/github.com/hashicorp/golang-lru/arc.go | 257 + vendor/github.com/hashicorp/golang-lru/doc.go | 21 + vendor/github.com/hashicorp/golang-lru/lru.go | 110 + .../hashicorp/golang-lru/simplelru/lru.go | 161 + .../golang-lru/simplelru/lru_interface.go | 37 + .../golang_protobuf_extensions/LICENSE | 201 + .../golang_protobuf_extensions/NOTICE | 1 + .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + vendor/github.com/morikuni/aec/LICENSE | 21 + vendor/github.com/morikuni/aec/aec.go | 137 + vendor/github.com/morikuni/aec/ansi.go | 59 + vendor/github.com/morikuni/aec/builder.go | 388 + vendor/github.com/morikuni/aec/sgr.go | 202 + .../prometheus/client_golang/AUTHORS.md | 18 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 23 + .../client_golang/prometheus/collector.go | 75 + .../client_golang/prometheus/counter.go | 172 + .../client_golang/prometheus/desc.go | 205 + .../client_golang/prometheus/doc.go | 181 + .../prometheus/expvar_collector.go | 119 + .../client_golang/prometheus/fnv.go | 29 + .../client_golang/prometheus/gauge.go | 140 + .../client_golang/prometheus/go_collector.go | 263 + .../client_golang/prometheus/histogram.go | 444 ++ .../client_golang/prometheus/http.go | 490 ++ .../client_golang/prometheus/metric.go | 166 + .../prometheus/process_collector.go | 142 + .../client_golang/prometheus/registry.go | 806 +++ .../client_golang/prometheus/summary.go | 534 ++ .../client_golang/prometheus/untyped.go | 138 + .../client_golang/prometheus/value.go | 234 + .../client_golang/prometheus/vec.go | 404 ++ .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 364 + .../prometheus/client_model/ruby/LICENSE | 201 + vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 429 ++ .../prometheus/common/expfmt/encode.go | 88 + .../prometheus/common/expfmt/expfmt.go | 38 + .../prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 303 + .../prometheus/common/expfmt/text_parse.go | 757 ++ .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 + .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 103 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/time.go | 264 + .../prometheus/common/model/value.go | 416 ++ vendor/github.com/prometheus/procfs/LICENSE | 201 + vendor/github.com/prometheus/procfs/NOTICE | 7 + .../github.com/prometheus/procfs/buddyinfo.go | 95 + vendor/github.com/prometheus/procfs/doc.go | 45 + vendor/github.com/prometheus/procfs/fs.go | 82 + .../prometheus/procfs/internal/util/parse.go | 46 + vendor/github.com/prometheus/procfs/ipvs.go | 259 + vendor/github.com/prometheus/procfs/mdstat.go | 151 + .../prometheus/procfs/mountstats.go | 569 ++ .../github.com/prometheus/procfs/net_dev.go | 216 + .../github.com/prometheus/procfs/nfs/nfs.go | 263 + .../github.com/prometheus/procfs/nfs/parse.go | 317 + .../prometheus/procfs/nfs/parse_nfs.go | 67 + .../prometheus/procfs/nfs/parse_nfsd.go | 89 + vendor/github.com/prometheus/procfs/proc.go | 238 + .../github.com/prometheus/procfs/proc_io.go | 65 + .../prometheus/procfs/proc_limits.go | 150 + .../github.com/prometheus/procfs/proc_ns.go | 68 + .../github.com/prometheus/procfs/proc_stat.go | 188 + vendor/github.com/prometheus/procfs/stat.go | 232 + vendor/github.com/prometheus/procfs/xfrm.go | 187 + .../github.com/prometheus/procfs/xfs/parse.go | 330 + .../github.com/prometheus/procfs/xfs/xfs.go | 163 + .../pkg/apimachinery/announced/announced.go | 99 - .../apimachinery/announced/group_factory.go | 255 - .../apimachinery/pkg/apimachinery/doc.go | 20 - .../pkg/apimachinery/registered/registered.go | 336 - .../apimachinery/pkg/apimachinery/types.go | 87 - .../apimachinery/pkg/util/cache/cache.go | 83 + .../pkg/util/cache/lruexpirecache.go | 102 + .../client-go/tools/cache/controller.go | 394 + .../client-go/tools/cache/delta_fifo.go | 659 ++ .../pkg/api => client-go/tools/cache}/doc.go | 16 +- .../client-go/tools/cache/expiration_cache.go | 208 + .../tools/cache/expiration_cache_fakes.go | 54 + .../tools/cache/fake_custom_store.go | 102 + vendor/k8s.io/client-go/tools/cache/fifo.go | 358 + vendor/k8s.io/client-go/tools/cache/heap.go | 323 + vendor/k8s.io/client-go/tools/cache/index.go | 87 + .../k8s.io/client-go/tools/cache/listers.go | 160 + .../k8s.io/client-go/tools/cache/listwatch.go | 188 + .../client-go/tools/cache/mutation_cache.go | 261 + .../tools/cache/mutation_detector.go | 127 + .../k8s.io/client-go/tools/cache/reflector.go | 449 ++ .../tools/cache/reflector_metrics.go | 119 + .../client-go/tools/cache/shared_informer.go | 597 ++ vendor/k8s.io/client-go/tools/cache/store.go | 244 + .../tools/cache/thread_safe_store.go | 304 + .../client-go/tools/cache/undelta_store.go | 83 + vendor/k8s.io/client-go/tools/pager/pager.go | 118 + .../client-go/util/buffer/ring_growing.go | 72 + vendor/k8s.io/client-go/util/retry/util.go | 79 + .../pkg/api/annotation_key_constants.go | 94 - .../kubernetes/pkg/api/field_constants.go | 38 - vendor/k8s.io/kubernetes/pkg/api/json.go | 28 - .../kubernetes/pkg/api/objectreference.go | 34 - vendor/k8s.io/kubernetes/pkg/api/register.go | 124 - vendor/k8s.io/kubernetes/pkg/api/resource.go | 62 - vendor/k8s.io/kubernetes/pkg/api/taint.go | 36 - .../k8s.io/kubernetes/pkg/api/toleration.go | 30 - vendor/k8s.io/kubernetes/pkg/api/types.go | 4287 ----------- .../k8s.io/kubernetes/pkg/api/v1/pod/util.go | 296 + .../pkg/api/zz_generated.deepcopy.go | 6320 ----------------- 312 files changed, 27470 insertions(+), 17024 deletions(-) create mode 100644 internal/renderer/render_test.go create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/docker/cli/cli/command/stack/cmd.go create mode 100644 vendor/github.com/docker/cli/cli/command/stack/common.go create mode 100644 vendor/github.com/docker/cli/cli/command/stack/deploy.go create mode 100644 vendor/github.com/docker/cli/cli/command/stack/kubernetes/warnings.go create mode 100644 vendor/github.com/docker/cli/cli/command/stack/list.go create mode 100644 vendor/github.com/docker/cli/cli/command/stack/ps.go create mode 100644 vendor/github.com/docker/cli/cli/command/stack/remove.go create mode 100644 vendor/github.com/docker/cli/cli/command/stack/services.go create mode 100644 vendor/github.com/docker/cli/kubernetes/compose/v1beta1/parsing.go delete mode 100644 vendor/github.com/docker/distribution/context/context.go delete mode 100644 vendor/github.com/docker/distribution/context/doc.go delete mode 100644 vendor/github.com/docker/distribution/context/http.go delete mode 100644 vendor/github.com/docker/distribution/context/logger.go delete mode 100644 vendor/github.com/docker/distribution/context/trace.go delete mode 100644 vendor/github.com/docker/distribution/context/util.go delete mode 100644 vendor/github.com/docker/distribution/context/version.go create mode 100644 vendor/github.com/docker/distribution/metrics/prometheus.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/pass/pass_linux.go rename vendor/github.com/docker/docker/api/types/volume/{volumes_create.go => volume_create.go} (81%) rename vendor/github.com/docker/docker/api/types/volume/{volumes_list.go => volume_list.go} (74%) create mode 100644 vendor/github.com/docker/docker/client/build_cancel.go delete mode 100644 vendor/github.com/docker/docker/client/tlsconfig_clone.go delete mode 100644 vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/docker/go-metrics/LICENSE.code create mode 100644 vendor/github.com/docker/go-metrics/LICENSE.docs create mode 100644 vendor/github.com/docker/go-metrics/NOTICE create mode 100644 vendor/github.com/docker/go-metrics/counter.go create mode 100644 vendor/github.com/docker/go-metrics/docs.go create mode 100644 vendor/github.com/docker/go-metrics/gauge.go create mode 100644 vendor/github.com/docker/go-metrics/handler.go create mode 100644 vendor/github.com/docker/go-metrics/helpers.go create mode 100644 vendor/github.com/docker/go-metrics/namespace.go create mode 100644 vendor/github.com/docker/go-metrics/register.go create mode 100644 vendor/github.com/docker/go-metrics/timer.go create mode 100644 vendor/github.com/docker/go-metrics/unit.go create mode 100644 vendor/github.com/hashicorp/golang-lru/2q.go create mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/arc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/doc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/morikuni/aec/LICENSE create mode 100644 vendor/github.com/morikuni/aec/aec.go create mode 100644 vendor/github.com/morikuni/aec/ansi.go create mode 100644 vendor/github.com/morikuni/aec/builder.go create mode 100644 vendor/github.com/morikuni/aec/sgr.go create mode 100644 vendor/github.com/prometheus/client_golang/AUTHORS.md create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/client_model/ruby/LICENSE create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/cache.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go create mode 100644 vendor/k8s.io/client-go/tools/cache/controller.go create mode 100644 vendor/k8s.io/client-go/tools/cache/delta_fifo.go rename vendor/k8s.io/{kubernetes/pkg/api => client-go/tools/cache}/doc.go (53%) create mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache.go create mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go create mode 100644 vendor/k8s.io/client-go/tools/cache/fake_custom_store.go create mode 100644 vendor/k8s.io/client-go/tools/cache/fifo.go create mode 100644 vendor/k8s.io/client-go/tools/cache/heap.go create mode 100644 vendor/k8s.io/client-go/tools/cache/index.go create mode 100644 vendor/k8s.io/client-go/tools/cache/listers.go create mode 100644 vendor/k8s.io/client-go/tools/cache/listwatch.go create mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_cache.go create mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_detector.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_metrics.go create mode 100644 vendor/k8s.io/client-go/tools/cache/shared_informer.go create mode 100755 vendor/k8s.io/client-go/tools/cache/store.go create mode 100644 vendor/k8s.io/client-go/tools/cache/thread_safe_store.go create mode 100644 vendor/k8s.io/client-go/tools/cache/undelta_store.go create mode 100644 vendor/k8s.io/client-go/tools/pager/pager.go create mode 100644 vendor/k8s.io/client-go/util/buffer/ring_growing.go create mode 100644 vendor/k8s.io/client-go/util/retry/util.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/field_constants.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/json.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/objectreference.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/resource.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/taint.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/toleration.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go diff --git a/Gopkg.lock b/Gopkg.lock index c6d54e762..baacd4442 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -34,6 +34,12 @@ packages = ["."] revision = "de5bf2ad457846296e2031421a34e2568e304e35" +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + [[projects]] name = "github.com/cbroglie/mustache" packages = ["."] @@ -71,6 +77,7 @@ "cli/command/node", "cli/command/service", "cli/command/service/progress", + "cli/command/stack", "cli/command/stack/kubernetes", "cli/command/stack/loader", "cli/command/stack/options", @@ -103,18 +110,17 @@ "service/logs", "templates" ] - revision = "7d5383da2462b9feb013cc3be5e977ddaf51978a" - source = "github.com/mnottale/cli" + revision = "95a9b4d5fe5d7557f26946adb8d7b546e6a4f130" [[projects]] name = "github.com/docker/distribution" packages = [ ".", - "context", "digestset", "manifest", "manifest/manifestlist", "manifest/schema2", + "metrics", "reference", "registry/api/errcode", "registry/api/v2", @@ -126,10 +132,9 @@ "registry/storage/cache/memory", "uuid" ] - revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" + revision = "83389a148052d74ac602f5f1d62f86ff2f3c4aa5" [[projects]] - branch = "master" name = "github.com/docker/docker" packages = [ "api", @@ -170,17 +175,16 @@ "registry", "registry/resumable" ] - revision = "1a57535aa277e0f2a3c1922c736551148c5b4351" + revision = "c752b0991e31ba9869ab6a0661af57e9423874fb" [[projects]] name = "github.com/docker/docker-credential-helpers" packages = [ "client", - "credentials", - "pass" + "credentials" ] - revision = "d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1" - version = "v0.6.0" + revision = "5241b46610f2491efdf9d1c85f1ddf5b02f6d962" + version = "v0.6.1" [[projects]] name = "github.com/docker/go" @@ -204,6 +208,11 @@ packages = ["."] revision = "9461782956ad83b30282bf90e31fa6a70c255ba9" +[[projects]] + name = "github.com/docker/go-metrics" + packages = ["."] + revision = "d466d4f6fd960e01820085bd7e1a24426ee7ef18" + [[projects]] name = "github.com/docker/go-units" packages = ["."] @@ -220,7 +229,7 @@ "manager/raftselector", "protobuf/plugin" ] - revision = "49a9d7f6ba3c1925262641e694c18eb43575f74b" + revision = "edd5641391926a50bc5f7040e20b7efc05003c26" [[projects]] name = "github.com/emicklei/go-restful" @@ -342,6 +351,15 @@ revision = "53c1911da2b537f792e7cafcb446b05ffe33b996" version = "v1.6.1" +[[projects]] + branch = "master" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru" + ] + revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" + [[projects]] branch = "master" name = "github.com/howeyc/gopass" @@ -382,6 +400,12 @@ revision = "02e3cf038dcea8290e44424da473dd12be796a8a" version = "v1.0.3" +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + [[projects]] branch = "master" name = "github.com/miekg/pkcs11" @@ -406,6 +430,12 @@ revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f" version = "1.0.0" +[[projects]] + branch = "master" + name = "github.com/morikuni/aec" + packages = ["."] + revision = "39771216ff4c63d11f5e604076f9c45e8be1067b" + [[projects]] name = "github.com/opencontainers/go-digest" packages = ["."] @@ -436,6 +466,39 @@ revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" +[[projects]] + name = "github.com/prometheus/client_golang" + packages = ["prometheus"] + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs" + ] + revision = "94663424ae5ae9856b40a9f170762b4197024661" + [[projects]] name = "github.com/sirupsen/logrus" packages = ["."] @@ -665,9 +728,6 @@ "pkg/api/errors", "pkg/api/meta", "pkg/api/resource", - "pkg/apimachinery", - "pkg/apimachinery/announced", - "pkg/apimachinery/registered", "pkg/apis/meta/internalversion", "pkg/apis/meta/v1", "pkg/apis/meta/v1/unstructured", @@ -687,6 +747,7 @@ "pkg/runtime/serializer/versioning", "pkg/selection", "pkg/types", + "pkg/util/cache", "pkg/util/clock", "pkg/util/diff", "pkg/util/errors", @@ -747,17 +808,21 @@ "rest", "rest/watch", "tools/auth", + "tools/cache", "tools/clientcmd", "tools/clientcmd/api", "tools/clientcmd/api/latest", "tools/clientcmd/api/v1", "tools/metrics", + "tools/pager", "tools/reference", "transport", + "util/buffer", "util/cert", "util/flowcontrol", "util/homedir", - "util/integer" + "util/integer", + "util/retry" ] revision = "23781f4d6632d88e869066eaebb743857aa1ef9b" version = "v7.0.0" @@ -770,7 +835,7 @@ [[projects]] name = "k8s.io/kubernetes" - packages = ["pkg/api"] + packages = ["pkg/api/v1/pod"] revision = "v1.8.2" [[projects]] @@ -782,6 +847,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "f716205fe9433cc490be04ab21d6e8aac807029bde23d8b41aaf4499b72fa5b9" + inputs-digest = "3918387fed27aaa51dd96369ee34946ca702df33fc58d7157f90b682149ce8b3" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index e5b2f5b95..d53c45d1e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -1,4 +1,3 @@ - # Gopkg.toml example # # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md @@ -30,14 +29,9 @@ required = ["github.com/wadey/gocovmerge"] name = "gopkg.in/yaml.v2" version = "2.2.1" -[[override]] +[[constraint]] name = "github.com/docker/cli" branch = "master" - source = "github.com/mnottale/cli" - -[[override]] - name = "github.com/spf13/pflag" - revision = "3ebe029320b2676d667ae88da602a5f854788a8a" [[constraint]] name = "github.com/sirupsen/logrus" @@ -59,13 +53,17 @@ required = ["github.com/wadey/gocovmerge"] name = "github.com/docker/go-metrics" revision = "d466d4f6fd960e01820085bd7e1a24426ee7ef18" +[[override]] + name = "github.com/docker/docker" + revision = "c752b0991e31ba9869ab6a0661af57e9423874fb" + [[override]] name = "github.com/docker/distribution" - revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" + revision = "83389a148052d74ac602f5f1d62f86ff2f3c4aa5" [[override]] name = "github.com/docker/swarmkit" - revision = "49a9d7f6ba3c1925262641e694c18eb43575f74b" + revision = "edd5641391926a50bc5f7040e20b7efc05003c26" [[override]] name = "google.golang.org/grpc" diff --git a/cmd/docker-app/deploy.go b/cmd/docker-app/deploy.go index 412831fe5..1f8f05ff6 100644 --- a/cmd/docker-app/deploy.go +++ b/cmd/docker-app/deploy.go @@ -1,20 +1,15 @@ package main import ( - "context" - "fmt" - "os" - "github.com/docker/app/internal" "github.com/docker/app/internal/packager" "github.com/docker/app/internal/renderer" "github.com/docker/cli/cli" "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack" "github.com/docker/cli/cli/command/stack/options" - "github.com/docker/cli/cli/command/stack/swarm" - cliflags "github.com/docker/cli/cli/flags" "github.com/spf13/cobra" + "github.com/spf13/pflag" ) type deployOptions struct { @@ -37,7 +32,7 @@ func deployCmd(dockerCli *command.DockerCli) *cobra.Command { Long: `Deploy the application on either Swarm or Kubernetes.`, Args: cli.RequiresMaxArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - return runDeploy(dockerCli, firstOrEmpty(args), opts) + return runDeploy(dockerCli, cmd.Flags(), firstOrEmpty(args), opts) }, } @@ -53,18 +48,15 @@ func deployCmd(dockerCli *command.DockerCli) *cobra.Command { return cmd } -func runDeploy(dockerCli *command.DockerCli, appname string, opts deployOptions) error { +func runDeploy(dockerCli *command.DockerCli, flags *pflag.FlagSet, appname string, opts deployOptions) error { appname, cleanup, err := packager.Extract(appname) if err != nil { return err } defer cleanup() - deployOrchestrator := opts.deployOrchestrator - if do, ok := os.LookupEnv("DOCKER_ORCHESTRATOR"); ok { - deployOrchestrator = do - } - if deployOrchestrator != "swarm" && deployOrchestrator != "kubernetes" { - return fmt.Errorf("orchestrator must be either 'swarm' or 'kubernetes'") + deployOrchestrator, err := command.GetStackOrchestrator(opts.deployOrchestrator, dockerCli.ConfigFile().StackOrchestrator, dockerCli.Err()) + if err != nil { + return err } d, err := parseSettings(opts.deployEnv) if err != nil { @@ -74,28 +66,11 @@ func runDeploy(dockerCli *command.DockerCli, appname string, opts deployOptions) if err != nil { return err } - dockerCli.Initialize(&cliflags.ClientOptions{ - Common: &cliflags.CommonOptions{ - Orchestrator: deployOrchestrator, - }, - }) stackName := opts.deployStackName if stackName == "" { stackName = internal.AppNameFromDir(appname) } - if deployOrchestrator == "swarm" { - ctx := context.Background() - return swarm.DeployCompose(ctx, dockerCli, rendered, options.Deploy{ - Namespace: stackName, - }) - } - // kube mode - kubeCli, err := kubernetes.WrapCli(dockerCli, kubernetes.Options{ - Namespace: opts.deployNamespace, - Config: opts.deployKubeConfig, + return stack.RunDeploy(dockerCli, flags, rendered, deployOrchestrator, options.Deploy{ + Namespace: stackName, }) - if err != nil { - return err - } - return kubernetes.DeployStack(kubeCli, options.Deploy{Namespace: stackName}, rendered) } diff --git a/e2e/testdata/helm-expected.chart/templates/stack-v1beta1.yaml b/e2e/testdata/helm-expected.chart/templates/stack-v1beta1.yaml index 0bfd96ba0..f4d94c249 100644 --- a/e2e/testdata/helm-expected.chart/templates/stack-v1beta1.yaml +++ b/e2e/testdata/helm-expected.chart/templates/stack-v1beta1.yaml @@ -10,7 +10,7 @@ objectmeta: initializers: null labels: {} name: helm - namespace: default + namespace: "" ownerreferences: [] resourceversion: "" selflink: "" diff --git a/e2e/testdata/helm-expected.chart/templates/stack.yaml b/e2e/testdata/helm-expected.chart/templates/stack.yaml index d8340ed6a..8bf42c669 100644 --- a/e2e/testdata/helm-expected.chart/templates/stack.yaml +++ b/e2e/testdata/helm-expected.chart/templates/stack.yaml @@ -12,7 +12,7 @@ metadata: initializers: null labels: {} name: helm - namespace: default + namespace: "" ownerreferences: [] resourceversion: "" selflink: "" diff --git a/internal/renderer/helm.go b/internal/renderer/helm.go index 662d7c8a3..7381606eb 100644 --- a/internal/renderer/helm.go +++ b/internal/renderer/helm.go @@ -14,7 +14,7 @@ import ( "github.com/docker/app/internal/templateloader" "github.com/docker/app/internal/templatev1beta2" "github.com/docker/app/internal/types" - conversion "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/kubernetes" "github.com/docker/cli/cli/compose/loader" "github.com/docker/cli/kubernetes/compose/v1beta1" "github.com/docker/cli/kubernetes/compose/v1beta2" @@ -215,40 +215,47 @@ func makeChart(appname, targetDir string) error { return ioutil.WriteFile(filepath.Join(targetDir, "Chart.yaml"), hmetadata, 0644) } +func typeMeta(stackVersion string) metav1.TypeMeta { + return metav1.TypeMeta{ + Kind: "stacks.compose.docker.com", + APIVersion: stackVersion, + } +} + +func objectMeta(appname string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: internal.AppNameFromDir(appname), + } +} + func helmRender(appname string, targetDir string, composeFiles []string, settingsFile []string, env map[string]string, stackVersion string) error { rendered, err := Render(appname, composeFiles, settingsFile, env) if err != nil { return err } + converter, err := kubernetes.NewStackConverter(stackVersion) + if err != nil { + return err + } + name := internal.AppNameFromDir(appname) + s, err := converter.FromCompose(ioutil.Discard, name, rendered) + if err != nil { + return err + } var stack interface{} switch stackVersion { case V1Beta2: - stackSpec := conversion.FromComposeConfig(rendered) stack = v1beta2.Stack{ - TypeMeta: metav1.TypeMeta{ - Kind: "stacks.compose.docker.com", - APIVersion: V1Beta2, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: internal.AppNameFromDir(appname), - }, - Spec: stackSpec, + TypeMeta: typeMeta(stackVersion), + ObjectMeta: objectMeta(appname), + Spec: s.Spec, } case V1Beta1: - composeFile, err := yaml.Marshal(rendered) - if err != nil { - return err - } stack = v1beta1.Stack{ - TypeMeta: metav1.TypeMeta{ - Kind: "stacks.compose.docker.com", - APIVersion: V1Beta1, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: internal.AppNameFromDir(appname), - }, + TypeMeta: typeMeta(stackVersion), + ObjectMeta: objectMeta(appname), Spec: v1beta1.StackSpec{ - ComposeFile: string(composeFile), + ComposeFile: s.ComposeFile, }, } default: @@ -277,15 +284,9 @@ func makeStack(appname string, targetDir string, data []byte, stackVersion strin case V1Beta2: stackSpec := templateconversion.FromComposeConfig(rendered) stack = templatev1beta2.Stack{ - TypeMeta: metav1.TypeMeta{ - Kind: "stacks.compose.docker.com", - APIVersion: V1Beta2, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: internal.AppNameFromDir(appname), - Namespace: "default", // FIXME - }, - Spec: stackSpec, + TypeMeta: typeMeta(stackVersion), + ObjectMeta: objectMeta(appname), + Spec: stackSpec, } case V1Beta1: composeFile, err := yaml.Marshal(rendered) @@ -293,14 +294,8 @@ func makeStack(appname string, targetDir string, data []byte, stackVersion strin return err } stack = v1beta1.Stack{ - TypeMeta: metav1.TypeMeta{ - Kind: "stacks.compose.docker.com", - APIVersion: V1Beta1, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: internal.AppNameFromDir(appname), - Namespace: "default", // FIXME - }, + TypeMeta: typeMeta(stackVersion), + ObjectMeta: objectMeta(appname), Spec: v1beta1.StackSpec{ ComposeFile: string(composeFile), }, diff --git a/internal/renderer/render.go b/internal/renderer/render.go index a39ac4264..9c0c46ea6 100644 --- a/internal/renderer/render.go +++ b/internal/renderer/render.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" "path/filepath" + "regexp" "strings" "text/template" @@ -13,11 +14,24 @@ import ( "github.com/docker/app/internal" "github.com/docker/app/internal/yatee" "github.com/docker/cli/cli/compose/loader" + composetemplate "github.com/docker/cli/cli/compose/template" composetypes "github.com/docker/cli/cli/compose/types" "github.com/pkg/errors" "gopkg.in/yaml.v2" ) +var ( + delimiter = "\\$" + substitution = "[_a-z][._a-z0-9]*(?::?[-?][^}]*)?" + + patternString = fmt.Sprintf( + "%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))", + delimiter, delimiter, substitution, substitution, + ) + + pattern = regexp.MustCompile(patternString) +) + //flattenYAML reads a YAML file and return a flattened view func flattenYAML(content []byte) (map[string]string, error) { in := make(map[interface{}]interface{}) @@ -218,11 +232,59 @@ func Render(appname string, composeFiles []string, settingsFile []string, env ma } configFiles = append(configFiles, composetypes.ConfigFile{Config: parsed}) } + return render(configFiles, finalEnv) +} + +func render(configFiles []composetypes.ConfigFile, finalEnv map[string]string) (*composetypes.Config, error) { rendered, err := loader.Load(composetypes.ConfigDetails{ - WorkingDir: ".", - ConfigFiles: configFiles, - Environment: finalEnv, - ErrOnMissingVariable: true, + WorkingDir: ".", + ConfigFiles: configFiles, + Environment: finalEnv, + }, func(opts *loader.Options) { + opts.Interpolate.Substitute = substitute }) - return rendered, errors.Wrap(err, "failed to load Compose file") + if err != nil { + return nil, errors.Wrap(err, "failed to load Compose file") + } + processEnabled(rendered) + return rendered, nil +} + +func substitute(template string, mapping composetemplate.Mapping) (string, error) { + return composetemplate.SubstituteWith(template, mapping, pattern, errorIfMissing) +} + +func errorIfMissing(substitution string, mapping composetemplate.Mapping) (string, bool, error) { + value, found := mapping(substitution) + if !found { + return "", true, &composetemplate.InvalidTemplateError{ + Template: "required variable" + substitution + "is missing a value", + } + } + return value, true, nil +} + +func processEnabled(config *composetypes.Config) { + services := []composetypes.ServiceConfig{} + for _, service := range config.Services { + if service.Extras != nil { + if xEnabled, ok := service.Extras["x-enabled"]; ok && !isEnabled(xEnabled.(string)) { + continue + } + } + services = append(services, service) + } + config.Services = services +} + +func isEnabled(e string) bool { + e = strings.ToLower(e) + switch { + case e == "", e == "0", e == "false": + return false + case strings.HasPrefix(e, "!"): + return !isEnabled(e[1:]) + default: + return true + } } diff --git a/internal/renderer/render_test.go b/internal/renderer/render_test.go new file mode 100644 index 000000000..7db1b397a --- /dev/null +++ b/internal/renderer/render_test.go @@ -0,0 +1,55 @@ +package renderer + +import ( + "testing" + + composetypes "github.com/docker/cli/cli/compose/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestRenderMissingValue(t *testing.T) { + configFiles := []composetypes.ConfigFile{ + { + Config: map[string]interface{}{ + "version": "3", + "services": map[string]interface{}{ + "foo": map[string]interface{}{ + "image": "${imageName}:${version}", + }, + }, + }, + }, + } + finalEnv := map[string]string{ + "imageName": "foo", + } + _, err := render(configFiles, finalEnv) + assert.Check(t, err != nil) + assert.Check(t, is.ErrorContains(err, "required variable")) +} + +func TestRender(t *testing.T) { + configFiles := []composetypes.ConfigFile{ + { + Config: map[string]interface{}{ + "version": "3", + "services": map[string]interface{}{ + "foo": map[string]interface{}{ + "image": "busybox:${version}", + "command": []interface{}{"-text", "${foo.bar}"}, + }, + }, + }, + }, + } + finalEnv := map[string]string{ + "version": "latest", + "foo.bar": "baz", + } + c, err := render(configFiles, finalEnv) + assert.NilError(t, err) + assert.Check(t, is.Len(c.Services, 1)) + assert.Check(t, is.Equal(c.Services[0].Image, "busybox:latest")) + assert.Check(t, is.DeepEqual([]string(c.Services[0].Command), []string{"-text", "baz"})) +} diff --git a/internal/templateloader/loader.go b/internal/templateloader/loader.go index c269d9e75..e4781a001 100644 --- a/internal/templateloader/loader.go +++ b/internal/templateloader/loader.go @@ -43,7 +43,7 @@ func ParseYAML(source []byte) (map[string]interface{}, error) { } func processEnabled(configDict map[string]interface{}) bool { - if v, ok := configDict["enabled"]; ok { + if v, ok := configDict["x-enabled"]; ok { e := fmt.Sprintf("%v", v) e = strings.Trim(e, " ") reverse := len(e) != 0 && e[0] == '!' @@ -57,7 +57,7 @@ func processEnabled(configDict map[string]interface{}) bool { if disabled { return false } - delete(configDict, "enabled") + delete(configDict, "x-enabled") } for k, v := range configDict { if m, ok := v.(map[string]interface{}); ok { diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 000000000..339177be6 --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 000000000..d7d14f8eb --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index 39a457602..bc1edce71 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -26,6 +26,7 @@ Alex Mavrogiannis Alexander Boyd Alexander Larsson Alexander Morozov +Alexander Ryabov Alexandre González Alfred Landrum Alicia Lauerman @@ -69,6 +70,7 @@ Bill Wang Bin Liu Bingshen Wang Boaz Shuster +Bogdan Anton Boris Pruessmann Bradley Cicenas Brandon Philips @@ -139,6 +141,7 @@ Dattatraya Kumbhar Dave Goodchild Dave Henderson Dave Tucker +David Beitey David Calavera David Cramer David Dooling @@ -222,6 +225,7 @@ Harry Zhang He Simei Helen Xie Henning Sprang +Henry N Hernan Garcia Hongbin Lu Hu Keping @@ -329,6 +333,7 @@ Kenfe-Mickaël Laventure Kevin Burke Kevin Feyrer Kevin Kern +Kevin Kirsche Kevin Meredith Kevin Richardson khaled souf @@ -367,6 +372,7 @@ Luca Marturana Lucas Chan Luka Hartwig Lukasz Zajaczkowski +Lydell Manganti Lénaïc Huard Ma Shimiao Mabin @@ -432,6 +438,7 @@ Máximo Cuadros Nace Oroz Nahum Shalman Nalin Dahyabhai +Nassim 'Nass' Eddequiouaq Natalie Parker Nate Brennand Nathan Hsieh @@ -463,7 +470,9 @@ Paul Weaver Pavel Pospisil Paweł Szczekutowicz Peeyush Gupta +Per Lundberg Peter Edge +Peter Hsu Peter Jaffe Peter Nagy Peter Salvatore @@ -475,6 +484,7 @@ pidster pixelistik Pratik Karki Prayag Verma +Preston Cowley Pure White Qiang Huang Qinglan Peng @@ -540,7 +550,7 @@ Srini Brahmaroutu Stefan S. Stefan Scherer Stefan Weil -Stephen Day +Stephen Day Stephen Rust Steve Durrheimer Steven Burgess diff --git a/vendor/github.com/docker/cli/cli/cobra.go b/vendor/github.com/docker/cli/cli/cobra.go index e97abd2a1..03cdfcc57 100644 --- a/vendor/github.com/docker/cli/cli/cobra.go +++ b/vendor/github.com/docker/cli/cli/cobra.go @@ -17,12 +17,12 @@ func SetupRootCommand(rootCmd *cobra.Command) { cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) - cobra.AddTemplateFunc("useLine", UseLine) rootCmd.SetUsageTemplate(usageTemplate) rootCmd.SetHelpTemplate(helpTemplate) rootCmd.SetFlagErrorFunc(FlagErrorFunc) rootCmd.SetHelpCommand(helpCommand) + rootCmd.SetVersionTemplate("Docker version {{.Version}}\n") rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") @@ -99,20 +99,10 @@ func managementSubCommands(cmd *cobra.Command) []*cobra.Command { return cmds } -// UseLine returns the usage line for a command. This implementation is different -// from the default Command.UseLine in that it does not add a `[flags]` to the -// end of the line. -func UseLine(cmd *cobra.Command) string { - if cmd.HasParent() { - return cmd.Parent().CommandPath() + " " + cmd.Use - } - return cmd.Use -} - var usageTemplate = `Usage: -{{- if not .HasSubCommands}} {{ useLine . }}{{end}} -{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}}{{- if .HasAvailableFlags}} [OPTIONS]{{end}} COMMAND{{end}} {{ .Short | trim }} diff --git a/vendor/github.com/docker/cli/cli/command/cli.go b/vendor/github.com/docker/cli/cli/command/cli.go index 55a6c436d..b17aaf238 100644 --- a/vendor/github.com/docker/cli/cli/command/cli.go +++ b/vendor/github.com/docker/cli/cli/command/cli.go @@ -1,6 +1,7 @@ package command import ( + "context" "io" "net" "net/http" @@ -28,7 +29,6 @@ import ( "github.com/theupdateframework/notary" notaryclient "github.com/theupdateframework/notary/client" "github.com/theupdateframework/notary/passphrase" - "golang.org/x/net/context" ) // Streams is an interface which exposes the standard input and output streams @@ -162,15 +162,18 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { if err != nil { return err } - hasExperimental, err := isEnabled(cli.configFile.Experimental) + var experimentalValue string + // Environment variable always overrides configuration + if experimentalValue = os.Getenv("DOCKER_CLI_EXPERIMENTAL"); experimentalValue == "" { + experimentalValue = cli.configFile.Experimental + } + hasExperimental, err := isEnabled(experimentalValue) if err != nil { return errors.Wrap(err, "Experimental field") } - orchestrator := GetOrchestrator(hasExperimental, opts.Common.Orchestrator, cli.configFile.Orchestrator) cli.clientInfo = ClientInfo{ DefaultVersion: cli.client.ClientVersion(), HasExperimental: hasExperimental, - Orchestrator: orchestrator, } cli.initializeFromClient() return nil @@ -236,12 +239,6 @@ type ServerInfo struct { type ClientInfo struct { HasExperimental bool DefaultVersion string - Orchestrator Orchestrator -} - -// HasKubernetes checks if kubernetes orchestrator is enabled -func (c ClientInfo) HasKubernetes() bool { - return c.HasExperimental && c.Orchestrator == OrchestratorKubernetes } // NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. diff --git a/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go b/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go index 9ac1db7b6..d6389a14d 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go @@ -16,6 +16,15 @@ const ( defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}" defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" + defaultBuildCacheVerboseFormat = ` +ID: {{.ID}} +Description: {{.Description}} +Mutable: {{.Mutable}} +Size: {{.Size}} +CreatedAt: {{.CreatedAt}} +LastUsedAt: {{.LastUsedAt}} +UsageCount: {{.UsageCount}} +` typeHeader = "TYPE" totalHeader = "TOTAL" @@ -34,6 +43,7 @@ type DiskUsageContext struct { Images []*types.ImageSummary Containers []*types.Container Volumes []*types.Volume + BuildCache []*types.BuildCache BuilderSize int64 } @@ -100,6 +110,7 @@ func (ctx *DiskUsageContext) Write() (err error) { err = ctx.contextFormat(tmpl, &diskUsageBuilderContext{ builderSize: ctx.BuilderSize, + buildCache: ctx.BuildCache, }) if err != nil { return err @@ -184,6 +195,13 @@ func (ctx *DiskUsageContext) verboseWrite() error { // And build cache fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize))) + + t := template.Must(template.New("buildcache").Parse(defaultBuildCacheVerboseFormat)) + + for _, v := range ctx.BuildCache { + t.Execute(ctx.Output, *v) + } + return nil } @@ -366,6 +384,7 @@ func (c *diskUsageVolumesContext) Reclaimable() string { type diskUsageBuilderContext struct { HeaderContext builderSize int64 + buildCache []*types.BuildCache } func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) { @@ -377,11 +396,17 @@ func (c *diskUsageBuilderContext) Type() string { } func (c *diskUsageBuilderContext) TotalCount() string { - return "" + return fmt.Sprintf("%d", len(c.buildCache)) } func (c *diskUsageBuilderContext) Active() string { - return "" + numActive := 0 + for _, bc := range c.buildCache { + if bc.InUse { + numActive++ + } + } + return fmt.Sprintf("%d", numActive) } func (c *diskUsageBuilderContext) Size() string { @@ -389,5 +414,12 @@ func (c *diskUsageBuilderContext) Size() string { } func (c *diskUsageBuilderContext) Reclaimable() string { - return c.Size() + var inUseBytes int64 + for _, bc := range c.buildCache { + if bc.InUse { + inUseBytes += bc.Size + } + } + + return units.HumanSize(float64(c.builderSize - inUseBytes)) } diff --git a/vendor/github.com/docker/cli/cli/command/formatter/service.go b/vendor/github.com/docker/cli/cli/command/formatter/service.go index 35a5f1eba..5dde8006d 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/service.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/service.go @@ -85,6 +85,9 @@ ContainerSpec: {{- if .ContainerWorkDir }} Dir: {{ .ContainerWorkDir }} {{- end -}} +{{- if .HasContainerInit }} + Init: {{ .ContainerInit }} +{{- end -}} {{- if .ContainerUser }} User: {{ .ContainerUser }} {{- end }} @@ -92,11 +95,23 @@ ContainerSpec: Mounts: {{- end }} {{- range $mount := .ContainerMounts }} - Target = {{ $mount.Target }} - Source = {{ $mount.Source }} - ReadOnly = {{ $mount.ReadOnly }} - Type = {{ $mount.Type }} + Target: {{ $mount.Target }} + Source: {{ $mount.Source }} + ReadOnly: {{ $mount.ReadOnly }} + Type: {{ $mount.Type }} {{- end -}} +{{- if .Configs}} +Configs: +{{- range $config := .Configs }} + Target: {{$config.File.Name}} + Source: {{$config.ConfigName}} +{{- end }}{{ end }} +{{- if .Secrets }} +Secrets: +{{- range $secret := .Secrets }} + Target: {{$secret.File.Name}} + Source: {{$secret.SecretName}} +{{- end }}{{ end }} {{- if .HasResources }} Resources: {{- if .HasResourceReservations }} @@ -200,6 +215,14 @@ func (ctx *serviceInspectContext) Labels() map[string]string { return ctx.Service.Spec.Labels } +func (ctx *serviceInspectContext) Configs() []*swarm.ConfigReference { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Configs +} + +func (ctx *serviceInspectContext) Secrets() []*swarm.SecretReference { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Secrets +} + func (ctx *serviceInspectContext) IsModeGlobal() bool { return ctx.Service.Spec.Mode.Global != nil } @@ -352,6 +375,14 @@ func (ctx *serviceInspectContext) ContainerUser() string { return ctx.Service.Spec.TaskTemplate.ContainerSpec.User } +func (ctx *serviceInspectContext) HasContainerInit() bool { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Init != nil +} + +func (ctx *serviceInspectContext) ContainerInit() bool { + return *ctx.Service.Spec.TaskTemplate.ContainerSpec.Init +} + func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts } diff --git a/vendor/github.com/docker/cli/cli/command/formatter/stack.go b/vendor/github.com/docker/cli/cli/command/formatter/stack.go index 676bcc05f..965eaf60d 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/stack.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/stack.go @@ -5,9 +5,14 @@ import ( ) const ( - defaultStackTableFormat = "table {{.Name}}\t{{.Services}}" + // KubernetesStackTableFormat is the default Kubernetes stack format + KubernetesStackTableFormat = "table {{.Name}}\t{{.Services}}\t{{.Orchestrator}}\t{{.Namespace}}" + // SwarmStackTableFormat is the default Swarm stack format + SwarmStackTableFormat = "table {{.Name}}\t{{.Services}}\t{{.Orchestrator}}" - stackServicesHeader = "SERVICES" + stackServicesHeader = "SERVICES" + stackOrchestrastorHeader = "ORCHESTRATOR" + stackNamespaceHeader = "NAMESPACE" ) // Stack contains deployed stack information. @@ -16,15 +21,10 @@ type Stack struct { Name string // Services is the number of the services Services int -} - -// NewStackFormat returns a format for use with a stack Context -func NewStackFormat(source string) Format { - switch source { - case TableFormatKey: - return defaultStackTableFormat - } - return Format(source) + // Orchestrator is the platform where the stack is deployed + Orchestrator string + // Namespace is the Kubernetes namespace assigned to the stack + Namespace string } // StackWrite writes formatted stacks using the Context @@ -48,8 +48,10 @@ type stackContext struct { func newStackContext() *stackContext { stackCtx := stackContext{} stackCtx.header = map[string]string{ - "Name": nameHeader, - "Services": stackServicesHeader, + "Name": nameHeader, + "Services": stackServicesHeader, + "Orchestrator": stackOrchestrastorHeader, + "Namespace": stackNamespaceHeader, } return &stackCtx } @@ -65,3 +67,11 @@ func (s *stackContext) Name() string { func (s *stackContext) Services() string { return strconv.Itoa(s.s.Services) } + +func (s *stackContext) Orchestrator() string { + return s.s.Orchestrator +} + +func (s *stackContext) Namespace() string { + return s.s.Namespace +} diff --git a/vendor/github.com/docker/cli/cli/command/idresolver/idresolver.go b/vendor/github.com/docker/cli/cli/command/idresolver/idresolver.go index 6088b64b5..3d1f71a09 100644 --- a/vendor/github.com/docker/cli/cli/command/idresolver/idresolver.go +++ b/vendor/github.com/docker/cli/cli/command/idresolver/idresolver.go @@ -1,7 +1,7 @@ package idresolver import ( - "golang.org/x/net/context" + "context" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" diff --git a/vendor/github.com/docker/cli/cli/command/node/cmd.go b/vendor/github.com/docker/cli/cli/command/node/cmd.go index f5d194352..f96c9a6bf 100644 --- a/vendor/github.com/docker/cli/cli/command/node/cmd.go +++ b/vendor/github.com/docker/cli/cli/command/node/cmd.go @@ -1,6 +1,7 @@ package node import ( + "context" "errors" "github.com/docker/cli/cli" @@ -8,7 +9,6 @@ import ( "github.com/docker/docker/api/types" apiclient "github.com/docker/docker/client" "github.com/spf13/cobra" - "golang.org/x/net/context" ) // NewNodeCommand returns a cobra command for `node` subcommands diff --git a/vendor/github.com/docker/cli/cli/command/node/inspect.go b/vendor/github.com/docker/cli/cli/command/node/inspect.go index ffb4efe45..0dcb5db9b 100644 --- a/vendor/github.com/docker/cli/cli/command/node/inspect.go +++ b/vendor/github.com/docker/cli/cli/command/node/inspect.go @@ -1,6 +1,7 @@ package node import ( + "context" "fmt" "strings" @@ -8,7 +9,6 @@ import ( "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/formatter" "github.com/spf13/cobra" - "golang.org/x/net/context" ) type inspectOptions struct { diff --git a/vendor/github.com/docker/cli/cli/command/node/list.go b/vendor/github.com/docker/cli/cli/command/node/list.go index 7dac79566..d35ed0ea8 100644 --- a/vendor/github.com/docker/cli/cli/command/node/list.go +++ b/vendor/github.com/docker/cli/cli/command/node/list.go @@ -1,6 +1,7 @@ package node import ( + "context" "sort" "github.com/docker/cli/cli" @@ -10,7 +11,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" "github.com/spf13/cobra" - "golang.org/x/net/context" "vbom.ml/util/sortorder" ) diff --git a/vendor/github.com/docker/cli/cli/command/node/ps.go b/vendor/github.com/docker/cli/cli/command/node/ps.go index 5212e596f..2450e6af3 100644 --- a/vendor/github.com/docker/cli/cli/command/node/ps.go +++ b/vendor/github.com/docker/cli/cli/command/node/ps.go @@ -1,6 +1,7 @@ package node import ( + "context" "strings" "github.com/docker/cli/cli" @@ -12,7 +13,6 @@ import ( "github.com/docker/docker/api/types/swarm" "github.com/pkg/errors" "github.com/spf13/cobra" - "golang.org/x/net/context" ) type psOptions struct { diff --git a/vendor/github.com/docker/cli/cli/command/node/remove.go b/vendor/github.com/docker/cli/cli/command/node/remove.go index d23ec2fb5..65e3cdc30 100644 --- a/vendor/github.com/docker/cli/cli/command/node/remove.go +++ b/vendor/github.com/docker/cli/cli/command/node/remove.go @@ -1,11 +1,10 @@ package node import ( + "context" "fmt" "strings" - "golang.org/x/net/context" - "github.com/docker/cli/cli" "github.com/docker/cli/cli/command" "github.com/docker/docker/api/types" diff --git a/vendor/github.com/docker/cli/cli/command/node/update.go b/vendor/github.com/docker/cli/cli/command/node/update.go index 017cf7dcb..dbae49c6f 100644 --- a/vendor/github.com/docker/cli/cli/command/node/update.go +++ b/vendor/github.com/docker/cli/cli/command/node/update.go @@ -1,6 +1,7 @@ package node import ( + "context" "fmt" "github.com/docker/cli/cli" @@ -10,7 +11,6 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" - "golang.org/x/net/context" ) var ( diff --git a/vendor/github.com/docker/cli/cli/command/orchestrator.go b/vendor/github.com/docker/cli/cli/command/orchestrator.go index d27bd6c1f..5f3e44620 100644 --- a/vendor/github.com/docker/cli/cli/command/orchestrator.go +++ b/vendor/github.com/docker/cli/cli/command/orchestrator.go @@ -2,6 +2,7 @@ package command import ( "fmt" + "io" "os" ) @@ -13,47 +14,64 @@ const ( OrchestratorKubernetes = Orchestrator("kubernetes") // OrchestratorSwarm orchestrator OrchestratorSwarm = Orchestrator("swarm") + // OrchestratorAll orchestrator + OrchestratorAll = Orchestrator("all") orchestratorUnset = Orchestrator("unset") - defaultOrchestrator = OrchestratorSwarm - envVarDockerOrchestrator = "DOCKER_ORCHESTRATOR" + defaultOrchestrator = OrchestratorSwarm + envVarDockerStackOrchestrator = "DOCKER_STACK_ORCHESTRATOR" + envVarDockerOrchestrator = "DOCKER_ORCHESTRATOR" ) -func normalize(flag string) Orchestrator { - switch flag { +// HasKubernetes returns true if defined orchestrator has Kubernetes capabilities. +func (o Orchestrator) HasKubernetes() bool { + return o == OrchestratorKubernetes || o == OrchestratorAll +} + +// HasSwarm returns true if defined orchestrator has Swarm capabilities. +func (o Orchestrator) HasSwarm() bool { + return o == OrchestratorSwarm || o == OrchestratorAll +} + +// HasAll returns true if defined orchestrator has both Swarm and Kubernetes capabilities. +func (o Orchestrator) HasAll() bool { + return o == OrchestratorAll +} + +func normalize(value string) (Orchestrator, error) { + switch value { case "kubernetes": - return OrchestratorKubernetes + return OrchestratorKubernetes, nil case "swarm": - return OrchestratorSwarm + return OrchestratorSwarm, nil + case "": + return orchestratorUnset, nil + case "all": + return OrchestratorAll, nil default: - return orchestratorUnset + return defaultOrchestrator, fmt.Errorf("specified orchestrator %q is invalid, please use either kubernetes, swarm or all", value) } } -// GetOrchestrator checks DOCKER_ORCHESTRATOR environment variable and configuration file +// GetStackOrchestrator checks DOCKER_STACK_ORCHESTRATOR environment variable and configuration file // orchestrator value and returns user defined Orchestrator. -func GetOrchestrator(isExperimental bool, flagValue, value string) Orchestrator { - // Non experimental CLI has kubernetes disabled - if !isExperimental { - return defaultOrchestrator - } +func GetStackOrchestrator(flagValue, value string, stderr io.Writer) (Orchestrator, error) { // Check flag - if o := normalize(flagValue); o != orchestratorUnset { - return o + if o, err := normalize(flagValue); o != orchestratorUnset { + return o, err } // Check environment variable - env := os.Getenv(envVarDockerOrchestrator) - if o := normalize(env); o != orchestratorUnset { - return o + env := os.Getenv(envVarDockerStackOrchestrator) + if env == "" && os.Getenv(envVarDockerOrchestrator) != "" { + fmt.Fprintf(stderr, "WARNING: experimental environment variable %s is set. Please use %s instead\n", envVarDockerOrchestrator, envVarDockerStackOrchestrator) } - // Check specified orchestrator - if o := normalize(value); o != orchestratorUnset { - return o + if o, err := normalize(env); o != orchestratorUnset { + return o, err } - - if value != "" { - fmt.Fprintf(os.Stderr, "Specified orchestrator %q is invalid. Please use either kubernetes or swarm\n", value) + // Check specified orchestrator + if o, err := normalize(value); o != orchestratorUnset { + return o, err } // Nothing set, use default orchestrator - return defaultOrchestrator + return defaultOrchestrator, nil } diff --git a/vendor/github.com/docker/cli/cli/command/registry.go b/vendor/github.com/docker/cli/cli/command/registry.go index cb9a3947d..084d2b605 100644 --- a/vendor/github.com/docker/cli/cli/command/registry.go +++ b/vendor/github.com/docker/cli/cli/command/registry.go @@ -2,6 +2,7 @@ package command import ( "bufio" + "context" "encoding/base64" "encoding/json" "fmt" @@ -16,7 +17,6 @@ import ( "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" "github.com/pkg/errors" - "golang.org/x/net/context" ) // ElectAuthServer returns the default registry to use (by asking the daemon) diff --git a/vendor/github.com/docker/cli/cli/command/service/create.go b/vendor/github.com/docker/cli/cli/command/service/create.go index 299cdc383..ec74eb43c 100644 --- a/vendor/github.com/docker/cli/cli/command/service/create.go +++ b/vendor/github.com/docker/cli/cli/command/service/create.go @@ -1,6 +1,7 @@ package service import ( + "context" "fmt" "github.com/docker/cli/cli" @@ -10,7 +11,6 @@ import ( "github.com/docker/docker/api/types/versions" "github.com/spf13/cobra" "github.com/spf13/pflag" - "golang.org/x/net/context" ) func newCreateCommand(dockerCli command.Cli) *cobra.Command { @@ -58,6 +58,8 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command { flags.SetAnnotation(flagDNSSearch, "version", []string{"1.25"}) flags.Var(&opts.hosts, flagHost, "Set one or more custom host-to-IP mappings (host:ip)") flags.SetAnnotation(flagHost, "version", []string{"1.25"}) + flags.BoolVar(&opts.init, flagInit, false, "Use an init inside each service container to forward signals and reap processes") + flags.SetAnnotation(flagInit, "version", []string{"1.37"}) flags.Var(cliopts.NewListOptsRef(&opts.resources.resGenericResources, ValidateSingleGenericResource), "generic-resource", "User defined resources") flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"}) diff --git a/vendor/github.com/docker/cli/cli/command/service/helpers.go b/vendor/github.com/docker/cli/cli/command/service/helpers.go index f328c7059..eb508e85f 100644 --- a/vendor/github.com/docker/cli/cli/command/service/helpers.go +++ b/vendor/github.com/docker/cli/cli/command/service/helpers.go @@ -1,13 +1,13 @@ package service import ( + "context" "io" "io/ioutil" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/service/progress" "github.com/docker/docker/pkg/jsonmessage" - "golang.org/x/net/context" ) // waitOnService waits for the service to converge. It outputs a progress bar, diff --git a/vendor/github.com/docker/cli/cli/command/service/inspect.go b/vendor/github.com/docker/cli/cli/command/service/inspect.go index 8a9ddefe0..7f988fae5 100644 --- a/vendor/github.com/docker/cli/cli/command/service/inspect.go +++ b/vendor/github.com/docker/cli/cli/command/service/inspect.go @@ -1,10 +1,9 @@ package service import ( + "context" "strings" - "golang.org/x/net/context" - "github.com/docker/cli/cli" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/formatter" diff --git a/vendor/github.com/docker/cli/cli/command/service/list.go b/vendor/github.com/docker/cli/cli/command/service/list.go index 63adce1ec..db4b03769 100644 --- a/vendor/github.com/docker/cli/cli/command/service/list.go +++ b/vendor/github.com/docker/cli/cli/command/service/list.go @@ -1,6 +1,7 @@ package service import ( + "context" "fmt" "sort" @@ -14,7 +15,6 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" "github.com/spf13/cobra" - "golang.org/x/net/context" ) type listOptions struct { diff --git a/vendor/github.com/docker/cli/cli/command/service/logs.go b/vendor/github.com/docker/cli/cli/command/service/logs.go index 13f150b75..107c9d215 100644 --- a/vendor/github.com/docker/cli/cli/command/service/logs.go +++ b/vendor/github.com/docker/cli/cli/command/service/logs.go @@ -2,14 +2,13 @@ package service import ( "bytes" + "context" "fmt" "io" "sort" "strconv" "strings" - "golang.org/x/net/context" - "github.com/docker/cli/cli" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/idresolver" diff --git a/vendor/github.com/docker/cli/cli/command/service/opts.go b/vendor/github.com/docker/cli/cli/command/service/opts.go index e8f9718c7..6a9591abc 100644 --- a/vendor/github.com/docker/cli/cli/command/service/opts.go +++ b/vendor/github.com/docker/cli/cli/command/service/opts.go @@ -1,6 +1,7 @@ package service import ( + "context" "fmt" "sort" "strconv" @@ -18,7 +19,6 @@ import ( gogotypes "github.com/gogo/protobuf/types" "github.com/pkg/errors" "github.com/spf13/pflag" - "golang.org/x/net/context" ) type int64Value interface { @@ -480,6 +480,7 @@ type serviceOptions struct { user string groups opts.ListOpts credentialSpec credentialSpecOpt + init bool stopSignal string tty bool readOnly bool @@ -624,6 +625,7 @@ func (options *serviceOptions) ToService(ctx context.Context, apiClient client.N TTY: options.tty, ReadOnly: options.readOnly, Mounts: options.mounts.Value(), + Init: &options.init, DNSConfig: &swarm.DNSConfig{ Nameservers: options.dns.GetAll(), Search: options.dnsSearch.GetAll(), @@ -645,7 +647,7 @@ func (options *serviceOptions) ToService(ctx context.Context, apiClient client.N }, Mode: serviceMode, UpdateConfig: options.update.updateConfig(flags), - RollbackConfig: options.update.rollbackConfig(flags), + RollbackConfig: options.rollback.rollbackConfig(flags), EndpointSpec: options.endpoint.ToEndpointSpec(), } @@ -875,6 +877,7 @@ const ( flagRollbackMonitor = "rollback-monitor" flagRollbackOrder = "rollback-order" flagRollbackParallelism = "rollback-parallelism" + flagInit = "init" flagStopGracePeriod = "stop-grace-period" flagStopSignal = "stop-signal" flagTTY = "tty" diff --git a/vendor/github.com/docker/cli/cli/command/service/parse.go b/vendor/github.com/docker/cli/cli/command/service/parse.go index 6f69cbb47..254107071 100644 --- a/vendor/github.com/docker/cli/cli/command/service/parse.go +++ b/vendor/github.com/docker/cli/cli/command/service/parse.go @@ -1,12 +1,13 @@ package service import ( + "context" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/client" "github.com/pkg/errors" - "golang.org/x/net/context" ) // ParseSecrets retrieves the secrets with the requested names and fills diff --git a/vendor/github.com/docker/cli/cli/command/service/progress/progress.go b/vendor/github.com/docker/cli/cli/command/service/progress/progress.go index adff48684..4b9cdd733 100644 --- a/vendor/github.com/docker/cli/cli/command/service/progress/progress.go +++ b/vendor/github.com/docker/cli/cli/command/service/progress/progress.go @@ -1,6 +1,7 @@ package progress import ( + "context" "errors" "fmt" "io" @@ -16,7 +17,6 @@ import ( "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" - "golang.org/x/net/context" ) var ( diff --git a/vendor/github.com/docker/cli/cli/command/service/ps.go b/vendor/github.com/docker/cli/cli/command/service/ps.go index a1da43e97..0220f7e04 100644 --- a/vendor/github.com/docker/cli/cli/command/service/ps.go +++ b/vendor/github.com/docker/cli/cli/command/service/ps.go @@ -1,6 +1,7 @@ package service import ( + "context" "strings" "github.com/docker/cli/cli" @@ -14,7 +15,6 @@ import ( "github.com/docker/docker/client" "github.com/pkg/errors" "github.com/spf13/cobra" - "golang.org/x/net/context" ) type psOptions struct { diff --git a/vendor/github.com/docker/cli/cli/command/service/remove.go b/vendor/github.com/docker/cli/cli/command/service/remove.go index 38833b2e7..ee810b038 100644 --- a/vendor/github.com/docker/cli/cli/command/service/remove.go +++ b/vendor/github.com/docker/cli/cli/command/service/remove.go @@ -1,6 +1,7 @@ package service import ( + "context" "fmt" "strings" @@ -8,7 +9,6 @@ import ( "github.com/docker/cli/cli/command" "github.com/pkg/errors" "github.com/spf13/cobra" - "golang.org/x/net/context" ) func newRemoveCommand(dockerCli command.Cli) *cobra.Command { diff --git a/vendor/github.com/docker/cli/cli/command/service/scale.go b/vendor/github.com/docker/cli/cli/command/service/scale.go index d38b01b4b..5b656a7f3 100644 --- a/vendor/github.com/docker/cli/cli/command/service/scale.go +++ b/vendor/github.com/docker/cli/cli/command/service/scale.go @@ -1,12 +1,11 @@ package service import ( + "context" "fmt" "strconv" "strings" - "golang.org/x/net/context" - "github.com/docker/cli/cli" "github.com/docker/cli/cli/command" "github.com/docker/docker/api/types" diff --git a/vendor/github.com/docker/cli/cli/command/service/trust.go b/vendor/github.com/docker/cli/cli/command/service/trust.go index c304a9912..b7453ccbb 100644 --- a/vendor/github.com/docker/cli/cli/command/service/trust.go +++ b/vendor/github.com/docker/cli/cli/command/service/trust.go @@ -1,6 +1,7 @@ package service import ( + "context" "encoding/hex" "github.com/docker/cli/cli/command" @@ -12,7 +13,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/theupdateframework/notary/tuf/data" - "golang.org/x/net/context" ) func resolveServiceImageDigestContentTrust(dockerCli command.Cli, service *swarm.ServiceSpec) error { diff --git a/vendor/github.com/docker/cli/cli/command/service/update.go b/vendor/github.com/docker/cli/cli/command/service/update.go index ca9752a65..3e380db23 100644 --- a/vendor/github.com/docker/cli/cli/command/service/update.go +++ b/vendor/github.com/docker/cli/cli/command/service/update.go @@ -1,6 +1,7 @@ package service import ( + "context" "fmt" "sort" "strings" @@ -19,7 +20,6 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" - "golang.org/x/net/context" ) func newUpdateCommand(dockerCli command.Cli) *cobra.Command { @@ -94,6 +94,8 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command { flags.SetAnnotation(flagDNSSearchAdd, "version", []string{"1.25"}) flags.Var(&options.hosts, flagHostAdd, "Add a custom host-to-IP mapping (host:ip)") flags.SetAnnotation(flagHostAdd, "version", []string{"1.25"}) + flags.BoolVar(&options.init, flagInit, false, "Use an init inside each service container to forward signals and reap processes") + flags.SetAnnotation(flagInit, "version", []string{"1.37"}) // Add needs parsing, Remove only needs the key flags.Var(newListOptsVar(), flagGenericResourcesRemove, "Remove a Generic resource") @@ -235,6 +237,12 @@ func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, options *serviceOpti // nolint: gocyclo func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { + updateBoolPtr := func(flag string, field **bool) { + if flags.Changed(flag) { + b, _ := flags.GetBool(flag) + *field = &b + } + } updateString := func(flag string, field *string) { if flags.Changed(flag) { *field, _ = flags.GetString(flag) @@ -306,6 +314,7 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags updateString(flagWorkdir, &cspec.Dir) updateString(flagUser, &cspec.User) updateString(flagHostname, &cspec.Hostname) + updateBoolPtr(flagInit, &cspec.Init) if err := updateIsolation(flagIsolation, &cspec.Isolation); err != nil { return err } @@ -313,13 +322,14 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags return err } - if flags.Changed(flagLimitCPU) || flags.Changed(flagLimitMemory) { - taskResources().Limits = &swarm.Resources{} + if anyChanged(flags, flagLimitCPU, flagLimitMemory) { + taskResources().Limits = spec.TaskTemplate.Resources.Limits updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) } - if flags.Changed(flagReserveCPU) || flags.Changed(flagReserveMemory) { - taskResources().Reservations = &swarm.Resources{} + + if anyChanged(flags, flagReserveCPU, flagReserveMemory) { + taskResources().Reservations = spec.TaskTemplate.Resources.Reservations updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) } @@ -963,45 +973,99 @@ func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error return nil } +type hostMapping struct { + IPAddr string + Host string +} + // updateHosts performs a diff between existing host entries, entries to be // removed, and entries to be added. Host entries preserve the order in which they // were added, as the specification mentions that in case multiple entries for a // host exist, the first entry should be used (by default). +// +// Note that, even though unsupported by the the CLI, the service specs format +// allow entries with both a _canonical_ hostname, and one or more aliases +// in an entry (IP-address canonical_hostname [alias ...]) +// +// Entries can be removed by either a specific `:` mapping, +// or by `` alone: +// +// - If both IP-address and host-name is provided, the hostname is removed only +// from entries that match the given IP-address. +// - If only a host-name is provided, the hostname is removed from any entry it +// is part of (either as canonical host-name, or as alias). +// - If, after removing the host-name from an entry, no host-names remain in +// the entry, the entry itself is removed. +// +// For example, the list of host-entries before processing could look like this: +// +// hosts = &[]string{ +// "127.0.0.2 host3 host1 host2 host4", +// "127.0.0.1 host1 host4", +// "127.0.0.3 host1", +// "127.0.0.1 host1", +// } +// +// Removing `host1` removes every occurrence: +// +// hosts = &[]string{ +// "127.0.0.2 host3 host2 host4", +// "127.0.0.1 host4", +// } +// +// Removing `host1:127.0.0.1` on the other hand, only remove the host if the +// IP-address matches: +// +// hosts = &[]string{ +// "127.0.0.2 host3 host1 host2 host4", +// "127.0.0.1 host4", +// "127.0.0.3 host1", +// } func updateHosts(flags *pflag.FlagSet, hosts *[]string) error { - // Combine existing Hosts (in swarmkit format) with the host to add (convert to swarmkit format) - if flags.Changed(flagHostAdd) { - values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) - *hosts = append(*hosts, values...) - } - // Remove duplicate - *hosts = removeDuplicates(*hosts) - - keysToRemove := make(map[string]struct{}) + var toRemove []hostMapping if flags.Changed(flagHostRemove) { - var empty struct{} extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll() for _, entry := range extraHostsToRemove { - key := strings.SplitN(entry, ":", 2)[0] - keysToRemove[key] = empty + v := strings.SplitN(entry, ":", 2) + if len(v) > 1 { + toRemove = append(toRemove, hostMapping{IPAddr: v[1], Host: v[0]}) + } else { + toRemove = append(toRemove, hostMapping{Host: v[0]}) + } } } - newHosts := []string{} + var newHosts []string for _, entry := range *hosts { - // Since this is in swarmkit format, we need to find the key, which is canonical_hostname of: + // Since this is in SwarmKit format, we need to find the key, which is canonical_hostname of: // IP_address canonical_hostname [aliases...] parts := strings.Fields(entry) - if len(parts) > 1 { - key := parts[1] - if _, exists := keysToRemove[key]; !exists { - newHosts = append(newHosts, entry) + if len(parts) == 0 { + continue + } + ip := parts[0] + hostNames := parts[1:] + for _, rm := range toRemove { + if rm.IPAddr != "" && rm.IPAddr != ip { + continue } - } else { - newHosts = append(newHosts, entry) + for i, h := range hostNames { + if h == rm.Host { + hostNames = append(hostNames[:i], hostNames[i+1:]...) + } + } + } + if len(hostNames) > 0 { + newHosts = append(newHosts, fmt.Sprintf("%s %s", ip, strings.Join(hostNames, " "))) } } - *hosts = newHosts + // Append new hosts (in SwarmKit format) + if flags.Changed(flagHostAdd) { + values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) + newHosts = append(newHosts, values...) + } + *hosts = removeDuplicates(newHosts) return nil } diff --git a/vendor/github.com/docker/cli/cli/command/stack/cmd.go b/vendor/github.com/docker/cli/cli/command/stack/cmd.go new file mode 100644 index 000000000..0416885ee --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stack/cmd.go @@ -0,0 +1,119 @@ +package stack + +import ( + "errors" + "fmt" + "io" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/config/configfile" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +var errUnsupportedAllOrchestrator = fmt.Errorf(`no orchestrator specified: use either "kubernetes" or "swarm"`) + +type commonOptions struct { + orchestrator command.Orchestrator +} + +// NewStackCommand returns a cobra command for `stack` subcommands +func NewStackCommand(dockerCli command.Cli) *cobra.Command { + var opts commonOptions + cmd := &cobra.Command{ + Use: "stack [OPTIONS]", + Short: "Manage Docker stacks", + Args: cli.NoArgs, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + orchestrator, err := getOrchestrator(dockerCli.ConfigFile(), cmd, dockerCli.Err()) + if err != nil { + return err + } + opts.orchestrator = orchestrator + hideOrchestrationFlags(cmd, orchestrator) + return checkSupportedFlag(cmd, orchestrator) + }, + + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.25", + }, + } + defaultHelpFunc := cmd.HelpFunc() + cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) { + hideOrchestrationFlags(cmd, opts.orchestrator) + defaultHelpFunc(cmd, args) + }) + cmd.AddCommand( + newDeployCommand(dockerCli, &opts), + newListCommand(dockerCli, &opts), + newPsCommand(dockerCli, &opts), + newRemoveCommand(dockerCli, &opts), + newServicesCommand(dockerCli, &opts), + ) + flags := cmd.PersistentFlags() + flags.String("kubeconfig", "", "Kubernetes config file") + flags.SetAnnotation("kubeconfig", "kubernetes", nil) + flags.String("orchestrator", "", "Orchestrator to use (swarm|kubernetes|all)") + return cmd +} + +// NewTopLevelDeployCommand returns a command for `docker deploy` +func NewTopLevelDeployCommand(dockerCli command.Cli) *cobra.Command { + cmd := newDeployCommand(dockerCli, nil) + // Remove the aliases at the top level + cmd.Aliases = []string{} + cmd.Annotations = map[string]string{ + "experimental": "", + "version": "1.25", + } + return cmd +} + +func getOrchestrator(config *configfile.ConfigFile, cmd *cobra.Command, stderr io.Writer) (command.Orchestrator, error) { + var orchestratorFlag string + if o, err := cmd.Flags().GetString("orchestrator"); err == nil { + orchestratorFlag = o + } + return command.GetStackOrchestrator(orchestratorFlag, config.StackOrchestrator, stderr) +} + +func hideOrchestrationFlags(cmd *cobra.Command, orchestrator command.Orchestrator) { + cmd.Flags().VisitAll(func(f *pflag.Flag) { + if _, ok := f.Annotations["kubernetes"]; ok && !orchestrator.HasKubernetes() { + f.Hidden = true + } + if _, ok := f.Annotations["swarm"]; ok && !orchestrator.HasSwarm() { + f.Hidden = true + } + }) + for _, subcmd := range cmd.Commands() { + hideOrchestrationFlags(subcmd, orchestrator) + } +} + +func checkSupportedFlag(cmd *cobra.Command, orchestrator command.Orchestrator) error { + errs := []string{} + cmd.Flags().VisitAll(func(f *pflag.Flag) { + if !f.Changed { + return + } + if _, ok := f.Annotations["kubernetes"]; ok && !orchestrator.HasKubernetes() { + errs = append(errs, fmt.Sprintf(`"--%s" is only supported on a Docker cli with kubernetes features enabled`, f.Name)) + } + if _, ok := f.Annotations["swarm"]; ok && !orchestrator.HasSwarm() { + errs = append(errs, fmt.Sprintf(`"--%s" is only supported on a Docker cli with swarm features enabled`, f.Name)) + } + }) + for _, subcmd := range cmd.Commands() { + if err := checkSupportedFlag(subcmd, orchestrator); err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/common.go b/vendor/github.com/docker/cli/cli/command/stack/common.go new file mode 100644 index 000000000..6de410da8 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stack/common.go @@ -0,0 +1,31 @@ +package stack + +import ( + "fmt" + "strings" + "unicode" +) + +// validateStackName checks if the provided string is a valid stack name (namespace). +// It currently only does a rudimentary check if the string is empty, or consists +// of only whitespace and quoting characters. +func validateStackName(namespace string) error { + v := strings.TrimFunc(namespace, quotesOrWhitespace) + if v == "" { + return fmt.Errorf("invalid stack name: %q", namespace) + } + return nil +} + +func validateStackNames(namespaces []string) error { + for _, ns := range namespaces { + if err := validateStackName(ns); err != nil { + return err + } + } + return nil +} + +func quotesOrWhitespace(r rune) bool { + return unicode.IsSpace(r) || r == '"' || r == '\'' +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/deploy.go b/vendor/github.com/docker/cli/cli/command/stack/deploy.go new file mode 100644 index 000000000..0165bbda1 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stack/deploy.go @@ -0,0 +1,90 @@ +package stack + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/loader" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newDeployCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + var opts options.Deploy + + cmd := &cobra.Command{ + Use: "deploy [OPTIONS] STACK", + Aliases: []string{"up"}, + Short: "Deploy a new stack or update an existing stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespace = args[0] + if err := validateStackName(opts.Namespace); err != nil { + return err + } + + commonOrchestrator := command.OrchestratorSwarm // default for top-level deploy command + if common != nil { + commonOrchestrator = common.orchestrator + } + + switch { + case opts.Bundlefile == "" && len(opts.Composefiles) == 0: + return errors.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).") + case opts.Bundlefile != "" && len(opts.Composefiles) != 0: + return errors.Errorf("You cannot specify both a bundle file and a Compose file.") + case opts.Bundlefile != "": + if commonOrchestrator != command.OrchestratorSwarm { + return errors.Errorf("bundle files are not supported on another orchestrator than swarm.") + } + return swarm.DeployBundle(context.Background(), dockerCli, opts) + } + + config, err := loader.LoadComposefile(dockerCli, opts) + if err != nil { + return err + } + return RunDeploy(dockerCli, cmd.Flags(), config, commonOrchestrator, opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.Bundlefile, "bundle-file", "", "Path to a Distributed Application Bundle file") + flags.SetAnnotation("bundle-file", "experimental", nil) + flags.SetAnnotation("bundle-file", "swarm", nil) + flags.StringSliceVarP(&opts.Composefiles, "compose-file", "c", []string{}, "Path to a Compose file") + flags.SetAnnotation("compose-file", "version", []string{"1.25"}) + flags.BoolVar(&opts.SendRegistryAuth, "with-registry-auth", false, "Send registry authentication details to Swarm agents") + flags.SetAnnotation("with-registry-auth", "swarm", nil) + flags.BoolVar(&opts.Prune, "prune", false, "Prune services that are no longer referenced") + flags.SetAnnotation("prune", "version", []string{"1.27"}) + flags.SetAnnotation("prune", "swarm", nil) + flags.StringVar(&opts.ResolveImage, "resolve-image", swarm.ResolveImageAlways, + `Query the registry to resolve image digest and supported platforms ("`+swarm.ResolveImageAlways+`"|"`+swarm.ResolveImageChanged+`"|"`+swarm.ResolveImageNever+`")`) + flags.SetAnnotation("resolve-image", "version", []string{"1.30"}) + flags.SetAnnotation("resolve-image", "swarm", nil) + kubernetes.AddNamespaceFlag(flags) + return cmd +} + +// RunDeploy performs a stack deploy against the specified orchestrator +func RunDeploy(dockerCli command.Cli, flags *pflag.FlagSet, config *composetypes.Config, commonOrchestrator command.Orchestrator, opts options.Deploy) error { + switch { + case commonOrchestrator.HasAll(): + return errUnsupportedAllOrchestrator + case commonOrchestrator.HasKubernetes(): + kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(flags, commonOrchestrator)) + if err != nil { + return err + } + return kubernetes.RunDeploy(kli, opts, config) + default: + return swarm.RunDeploy(dockerCli, opts, config) + } +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/cli.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/cli.go index 27064ba08..bcd0b01e5 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/cli.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/cli.go @@ -1,13 +1,13 @@ package kubernetes import ( - "os" - "path/filepath" + "fmt" + "net" + "net/url" "github.com/docker/cli/cli/command" "github.com/docker/cli/kubernetes" - "github.com/docker/docker/pkg/homedir" - "github.com/pkg/errors" + cliv1beta1 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1" flag "github.com/spf13/pflag" kubeclient "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" @@ -23,13 +23,16 @@ type KubeCli struct { // Options contains resolved parameters to initialize kubernetes clients type Options struct { - Namespace string - Config string + Namespace string + Config string + Orchestrator command.Orchestrator } // NewOptions returns an Options initialized with command line flags -func NewOptions(flags *flag.FlagSet) Options { - var opts Options +func NewOptions(flags *flag.FlagSet, orchestrator command.Orchestrator) Options { + opts := Options{ + Orchestrator: orchestrator, + } if namespace, err := flags.GetString("namespace"); err == nil { opts.Namespace = namespace } @@ -39,25 +42,29 @@ func NewOptions(flags *flag.FlagSet) Options { return opts } +// AddNamespaceFlag adds the namespace flag to the given flag set +func AddNamespaceFlag(flags *flag.FlagSet) { + flags.String("namespace", "", "Kubernetes namespace to use") + flags.SetAnnotation("namespace", "kubernetes", nil) +} + // WrapCli wraps command.Cli with kubernetes specifics func WrapCli(dockerCli command.Cli, opts Options) (*KubeCli, error) { - var err error cli := &KubeCli{ - Cli: dockerCli, - kubeNamespace: "default", + Cli: dockerCli, } - if opts.Namespace != "" { - cli.kubeNamespace = opts.Namespace - } - kubeConfig := opts.Config - if kubeConfig == "" { - if config := os.Getenv("KUBECONFIG"); config != "" { - kubeConfig = config - } else { - kubeConfig = filepath.Join(homedir.Get(), ".kube/config") + clientConfig := kubernetes.NewKubernetesConfig(opts.Config) + + cli.kubeNamespace = opts.Namespace + if opts.Namespace == "" { + configNamespace, _, err := clientConfig.Namespace() + if err != nil { + return nil, err } + cli.kubeNamespace = configNamespace } - config, err := kubernetes.NewKubernetesConfig(kubeConfig) + + config, err := clientConfig.ClientConfig() if err != nil { return nil, err } @@ -69,25 +76,51 @@ func WrapCli(dockerCli command.Cli, opts Options) (*KubeCli, error) { } cli.clientSet = clientSet + if opts.Orchestrator.HasAll() { + if err := cli.checkHostsMatch(); err != nil { + return nil, err + } + } return cli, nil } func (c *KubeCli) composeClient() (*Factory, error) { - return NewFactory(c.kubeNamespace, c.kubeConfig) + return NewFactory(c.kubeNamespace, c.kubeConfig, c.clientSet) } -func (c *KubeCli) stacks() (stackClient, error) { - version, err := kubernetes.GetStackAPIVersion(c.clientSet) +func (c *KubeCli) checkHostsMatch() error { + daemonEndpoint, err := url.Parse(c.Client().DaemonHost()) if err != nil { - return nil, err + return err } + kubeEndpoint, err := url.Parse(c.kubeConfig.Host) + if err != nil { + return err + } + if daemonEndpoint.Hostname() == kubeEndpoint.Hostname() { + return nil + } + // The daemon can be local in Docker for Desktop, e.g. "npipe", "unix", ... + if daemonEndpoint.Scheme != "tcp" { + ips, err := net.LookupIP(kubeEndpoint.Hostname()) + if err != nil { + return err + } + for _, ip := range ips { + if ip.IsLoopback() { + return nil + } + } + } + fmt.Fprintf(c.Err(), "WARNING: Swarm and Kubernetes hosts do not match (docker host=%s, kubernetes host=%s).\n"+ + " Update $DOCKER_HOST (or pass -H), or use 'kubectl config use-context' to match.\n", daemonEndpoint.Hostname(), kubeEndpoint.Hostname()) + return nil +} - switch version { - case kubernetes.StackAPIV1Beta1: - return newStackV1Beta1(c.kubeConfig, c.kubeNamespace) - case kubernetes.StackAPIV1Beta2: - return newStackV1Beta2(c.kubeConfig, c.kubeNamespace) - default: - return nil, errors.Errorf("no supported Stack API version") +func (c *KubeCli) stacksv1beta1() (cliv1beta1.StackInterface, error) { + raw, err := newStackV1Beta1(c.kubeConfig, c.kubeNamespace) + if err != nil { + return nil, err } + return raw.stacks, nil } diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/client.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/client.go index a44bf4459..5024c9234 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/client.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/client.go @@ -1,6 +1,10 @@ package kubernetes import ( + "github.com/docker/cli/kubernetes" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeclient "k8s.io/client-go/kubernetes" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" typesappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -13,10 +17,11 @@ type Factory struct { config *restclient.Config coreClientSet *corev1.CoreV1Client appsClientSet *appsv1beta2.AppsV1beta2Client + clientSet *kubeclient.Clientset } // NewFactory creates a kubernetes client factory -func NewFactory(namespace string, config *restclient.Config) (*Factory, error) { +func NewFactory(namespace string, config *restclient.Config, clientSet *kubeclient.Clientset) (*Factory, error) { coreClientSet, err := corev1.NewForConfig(config) if err != nil { return nil, err @@ -32,6 +37,7 @@ func NewFactory(namespace string, config *restclient.Config) (*Factory, error) { config: config, coreClientSet: coreClientSet, appsClientSet: appsClientSet, + clientSet: clientSet, }, nil } @@ -65,7 +71,33 @@ func (s *Factory) ReplicationControllers() corev1.ReplicationControllerInterface return s.coreClientSet.ReplicationControllers(s.namespace) } -// ReplicaSets return a client for kubernetes replace sets +// ReplicaSets returns a client for kubernetes replace sets func (s *Factory) ReplicaSets() typesappsv1beta2.ReplicaSetInterface { return s.appsClientSet.ReplicaSets(s.namespace) } + +// DaemonSets returns a client for kubernetes daemon sets +func (s *Factory) DaemonSets() typesappsv1beta2.DaemonSetInterface { + return s.appsClientSet.DaemonSets(s.namespace) +} + +// Stacks returns a client for Docker's Stack on Kubernetes +func (s *Factory) Stacks(allNamespaces bool) (StackClient, error) { + version, err := kubernetes.GetStackAPIVersion(s.clientSet) + if err != nil { + return nil, err + } + namespace := s.namespace + if allNamespaces { + namespace = metav1.NamespaceAll + } + + switch version { + case kubernetes.StackAPIV1Beta1: + return newStackV1Beta1(s.config, namespace) + case kubernetes.StackAPIV1Beta2: + return newStackV1Beta2(s.config, namespace) + default: + return nil, errors.Errorf("no supported Stack API version") + } +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/conversion.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/conversion.go index e5becda85..83986661e 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/conversion.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/conversion.go @@ -2,10 +2,13 @@ package kubernetes import ( "fmt" + "sort" + "strings" "time" "github.com/docker/cli/cli/command/formatter" "github.com/docker/cli/kubernetes/labels" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" appsv1beta2 "k8s.io/api/apps/v1beta2" apiv1 "k8s.io/api/core/v1" @@ -65,13 +68,43 @@ func toSwarmProtocol(protocol apiv1.Protocol) swarm.PortConfigProtocol { return swarm.PortConfigProtocol("unknown") } -func fetchPods(namespace string, pods corev1.PodInterface) ([]apiv1.Pod, error) { - labelSelector := labels.SelectorForStack(namespace) - podsList, err := pods.List(metav1.ListOptions{LabelSelector: labelSelector}) +func fetchPods(stackName string, pods corev1.PodInterface, f filters.Args) ([]apiv1.Pod, error) { + services := f.Get("service") + // for existing script compatibility, support either or _ format + stackNamePrefix := stackName + "_" + for _, s := range services { + if strings.HasPrefix(s, stackNamePrefix) { + services = append(services, strings.TrimPrefix(s, stackNamePrefix)) + } + } + listOpts := metav1.ListOptions{LabelSelector: labels.SelectorForStack(stackName, services...)} + var result []apiv1.Pod + podsList, err := pods.List(listOpts) if err != nil { return nil, err } - return podsList.Items, nil + nodes := f.Get("node") + for _, pod := range podsList.Items { + if filterPod(pod, nodes) && + // name filter is done client side for matching partials + f.FuzzyMatch("name", stackNamePrefix+pod.Name) { + + result = append(result, pod) + } + } + return result, nil +} + +func filterPod(pod apiv1.Pod, nodes []string) bool { + if len(nodes) == 0 { + return true + } + for _, name := range nodes { + if pod.Spec.NodeName == name { + return true + } + } + return false } func getContainerImage(containers []apiv1.Container) string { @@ -121,56 +154,76 @@ const ( publishedOnRandomPortSuffix = "-random-ports" ) -// Replicas conversion -func replicasToServices(replicas *appsv1beta2.ReplicaSetList, services *apiv1.ServiceList) ([]swarm.Service, map[string]formatter.ServiceListInfo, error) { +func convertToServices(replicas *appsv1beta2.ReplicaSetList, daemons *appsv1beta2.DaemonSetList, services *apiv1.ServiceList) ([]swarm.Service, map[string]formatter.ServiceListInfo, error) { result := make([]swarm.Service, len(replicas.Items)) - infos := make(map[string]formatter.ServiceListInfo, len(replicas.Items)) + infos := make(map[string]formatter.ServiceListInfo, len(replicas.Items)+len(daemons.Items)) for i, r := range replicas.Items { - serviceName := r.Labels[labels.ForServiceName] - serviceHeadless, ok := findService(services, serviceName) - if !ok { - return nil, nil, fmt.Errorf("could not find service '%s'", serviceName) - } - stack, ok := serviceHeadless.Labels[labels.ForStackName] - if ok { - stack += "_" - } - uid := string(serviceHeadless.UID) - s := swarm.Service{ - ID: uid, - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: stack + serviceHeadless.Name, - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: &swarm.ContainerSpec{ - Image: getContainerImage(r.Spec.Template.Spec.Containers), - }, - }, - }, - } - if serviceNodePort, ok := findService(services, serviceName+publishedOnRandomPortSuffix); ok && serviceNodePort.Spec.Type == apiv1.ServiceTypeNodePort { - s.Endpoint = serviceEndpoint(serviceNodePort, swarm.PortConfigPublishModeHost) - } - if serviceLoadBalancer, ok := findService(services, serviceName+publishedServiceSuffix); ok && serviceLoadBalancer.Spec.Type == apiv1.ServiceTypeLoadBalancer { - s.Endpoint = serviceEndpoint(serviceLoadBalancer, swarm.PortConfigPublishModeIngress) + s, err := convertToService(r.Labels[labels.ForServiceName], services, r.Spec.Template.Spec.Containers) + if err != nil { + return nil, nil, err } - result[i] = s - infos[uid] = formatter.ServiceListInfo{ + result[i] = *s + infos[s.ID] = formatter.ServiceListInfo{ Mode: "replicated", Replicas: fmt.Sprintf("%d/%d", r.Status.AvailableReplicas, r.Status.Replicas), } } + for _, d := range daemons.Items { + s, err := convertToService(d.Labels[labels.ForServiceName], services, d.Spec.Template.Spec.Containers) + if err != nil { + return nil, nil, err + } + result = append(result, *s) + infos[s.ID] = formatter.ServiceListInfo{ + Mode: "global", + Replicas: fmt.Sprintf("%d/%d", d.Status.NumberReady, d.Status.DesiredNumberScheduled), + } + } + sort.Slice(result, func(i, j int) bool { + return result[i].ID < result[j].ID + }) return result, infos, nil } -func findService(services *apiv1.ServiceList, name string) (apiv1.Service, bool) { +func convertToService(serviceName string, services *apiv1.ServiceList, containers []apiv1.Container) (*swarm.Service, error) { + serviceHeadless, err := findService(services, serviceName) + if err != nil { + return nil, err + } + stack, ok := serviceHeadless.Labels[labels.ForStackName] + if ok { + stack += "_" + } + uid := string(serviceHeadless.UID) + s := &swarm.Service{ + ID: uid, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: stack + serviceHeadless.Name, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: getContainerImage(containers), + }, + }, + }, + } + if serviceNodePort, err := findService(services, serviceName+publishedOnRandomPortSuffix); err == nil && serviceNodePort.Spec.Type == apiv1.ServiceTypeNodePort { + s.Endpoint = serviceEndpoint(serviceNodePort, swarm.PortConfigPublishModeHost) + } + if serviceLoadBalancer, err := findService(services, serviceName+publishedServiceSuffix); err == nil && serviceLoadBalancer.Spec.Type == apiv1.ServiceTypeLoadBalancer { + s.Endpoint = serviceEndpoint(serviceLoadBalancer, swarm.PortConfigPublishModeIngress) + } + return s, nil +} + +func findService(services *apiv1.ServiceList, name string) (apiv1.Service, error) { for _, s := range services.Items { if s.Name == name { - return s, true + return s, nil } } - return apiv1.Service{}, false + return apiv1.Service{}, fmt.Errorf("could not find service '%s'", name) } func serviceEndpoint(service apiv1.Service, publishMode swarm.PortConfigPublishMode) swarm.Endpoint { diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/convert.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/convert.go index c1b4df0dd..b122090fb 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/convert.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/convert.go @@ -1,18 +1,79 @@ package kubernetes import ( + "io" + "io/ioutil" "regexp" "strconv" "strings" "github.com/docker/cli/cli/compose/loader" + "github.com/docker/cli/cli/compose/schema" composeTypes "github.com/docker/cli/cli/compose/types" composetypes "github.com/docker/cli/cli/compose/types" "github.com/docker/cli/kubernetes/compose/v1beta1" "github.com/docker/cli/kubernetes/compose/v1beta2" + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// NewStackConverter returns a converter from types.Config (compose) to the specified +// stack version or error out if the version is not supported or existent. +func NewStackConverter(version string) (StackConverter, error) { + switch version { + case "v1beta1": + return stackV1Beta1Converter{}, nil + case "v1beta2": + return stackV1Beta2Converter{}, nil + default: + return nil, errors.Errorf("stack version %s unsupported", version) + } +} + +// StackConverter converts a compose types.Config to a Stack +type StackConverter interface { + FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) +} + +type stackV1Beta1Converter struct{} + +func (s stackV1Beta1Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) { + cfg.Version = v1beta1.MaxComposeVersion + st, err := fromCompose(stderr, name, cfg) + if err != nil { + return Stack{}, err + } + res, err := yaml.Marshal(cfg) + if err != nil { + return Stack{}, err + } + // reload the result to check that it produced a valid 3.5 compose file + resparsedConfig, err := loader.ParseYAML(res) + if err != nil { + return Stack{}, err + } + if err = schema.Validate(resparsedConfig, v1beta1.MaxComposeVersion); err != nil { + return Stack{}, errors.Wrapf(err, "the compose yaml file is invalid with v%s", v1beta1.MaxComposeVersion) + } + + st.ComposeFile = string(res) + return st, nil +} + +type stackV1Beta2Converter struct{} + +func (s stackV1Beta2Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) { + return fromCompose(stderr, name, cfg) +} + +func fromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) { + return Stack{ + Name: name, + Spec: fromComposeConfig(stderr, cfg), + }, nil +} + func loadStackData(composefile string) (*composetypes.Config, error) { parsed, err := loader.ParseYAML([]byte(composefile)) if err != nil { @@ -28,49 +89,52 @@ func loadStackData(composefile string) (*composetypes.Config, error) { } // Conversions from internal stack to different stack compose component versions. -func stackFromV1beta1(in *v1beta1.Stack) (stack, error) { +func stackFromV1beta1(in *v1beta1.Stack) (Stack, error) { cfg, err := loadStackData(in.Spec.ComposeFile) if err != nil { - return stack{}, err + return Stack{}, err } - return stack{ - name: in.ObjectMeta.Name, - composeFile: in.Spec.ComposeFile, - spec: FromComposeConfig(cfg), + return Stack{ + Name: in.ObjectMeta.Name, + Namespace: in.ObjectMeta.Namespace, + ComposeFile: in.Spec.ComposeFile, + Spec: fromComposeConfig(ioutil.Discard, cfg), }, nil } -func stackToV1beta1(s stack) *v1beta1.Stack { +func stackToV1beta1(s Stack) *v1beta1.Stack { return &v1beta1.Stack{ ObjectMeta: metav1.ObjectMeta{ - Name: s.name, + Name: s.Name, }, Spec: v1beta1.StackSpec{ - ComposeFile: s.composeFile, + ComposeFile: s.ComposeFile, }, } } -func stackFromV1beta2(in *v1beta2.Stack) stack { - return stack{ - name: in.ObjectMeta.Name, - spec: in.Spec, +func stackFromV1beta2(in *v1beta2.Stack) Stack { + return Stack{ + Name: in.ObjectMeta.Name, + Namespace: in.ObjectMeta.Namespace, + Spec: in.Spec, } } -func stackToV1beta2(s stack) *v1beta2.Stack { +func stackToV1beta2(s Stack) *v1beta2.Stack { return &v1beta2.Stack{ ObjectMeta: metav1.ObjectMeta{ - Name: s.name, + Name: s.Name, }, - Spec: s.spec, + Spec: s.Spec, } } -func FromComposeConfig(c *composeTypes.Config) *v1beta2.StackSpec { +func fromComposeConfig(stderr io.Writer, c *composeTypes.Config) *v1beta2.StackSpec { if c == nil { return nil } + warnUnsupportedFeatures(stderr, c) serviceConfigs := make([]v1beta2.ServiceConfig, len(c.Services)) for i, s := range c.Services { serviceConfigs[i] = fromComposeServiceConfig(s) diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/deploy.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/deploy.go index 8a7e4cbbe..0f0218516 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/deploy.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/deploy.go @@ -2,42 +2,29 @@ package kubernetes import ( "fmt" + "io" - "github.com/docker/cli/cli/command/stack/loader" + "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/stack/options" composetypes "github.com/docker/cli/cli/compose/types" - "github.com/pkg/errors" + "github.com/morikuni/aec" ) // RunDeploy is the kubernetes implementation of docker stack deploy -func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error { - // Check arguments - if len(opts.Composefiles) == 0 { - return errors.Errorf("Please specify only one compose file (with --compose-file).") - } - - // Parse the compose file - cfg, err := loader.LoadComposefile(dockerCli, opts) - if err != nil { - return err - } - return DeployStack(dockerCli, opts, cfg) -} - -// DeployStack deploys a parsed stack to kubernetes -func DeployStack(dockerCli *KubeCli, opts options.Deploy, cfg *composetypes.Config) error { +func RunDeploy(dockerCli *KubeCli, opts options.Deploy, cfg *composetypes.Config) error { cmdOut := dockerCli.Out() + // Initialize clients - stacks, err := dockerCli.stacks() + composeClient, err := dockerCli.composeClient() if err != nil { return err } - composeClient, err := dockerCli.composeClient() + stacks, err := composeClient.Stacks(false) if err != nil { return err } - stack, err := stacks.FromCompose(opts.Namespace, *cfg) + stack, err := stacks.FromCompose(dockerCli.Err(), opts.Namespace, cfg) if err != nil { return err } @@ -45,10 +32,6 @@ func DeployStack(dockerCli *KubeCli, opts options.Deploy, cfg *composetypes.Conf configMaps := composeClient.ConfigMaps() secrets := composeClient.Secrets() services := composeClient.Services() - pods := composeClient.Pods() - watcher := DeployWatcher{ - Pods: pods, - } if err := stacks.IsColliding(services, stack); err != nil { return err @@ -67,10 +50,109 @@ func DeployStack(dockerCli *KubeCli, opts options.Deploy, cfg *composetypes.Conf } fmt.Fprintln(cmdOut, "Waiting for the stack to be stable and running...") + v1beta1Cli, err := dockerCli.stacksv1beta1() + if err != nil { + return err + } - <-watcher.Watch(stack.name, stack.getServices()) - - fmt.Fprintf(cmdOut, "Stack %s is stable and running\n\n", stack.name) + pods := composeClient.Pods() + watcher := &deployWatcher{ + stacks: v1beta1Cli, + pods: pods, + } + statusUpdates := make(chan serviceStatus) + displayDone := make(chan struct{}) + go func() { + defer close(displayDone) + display := newStatusDisplay(dockerCli.Out()) + for status := range statusUpdates { + display.OnStatus(status) + } + }() + err = watcher.Watch(stack.Name, stack.getServices(), statusUpdates) + close(statusUpdates) + <-displayDone + if err != nil { + return err + } + fmt.Fprintf(cmdOut, "\nStack %s is stable and running\n\n", stack.Name) return nil + +} + +type statusDisplay interface { + OnStatus(serviceStatus) +} +type metaServiceState string + +const ( + metaServiceStateReady = metaServiceState("Ready") + metaServiceStatePending = metaServiceState("Pending") + metaServiceStateFailed = metaServiceState("Failed") +) + +func metaStateFromStatus(status serviceStatus) metaServiceState { + switch { + case status.podsReady > 0: + return metaServiceStateReady + case status.podsPending > 0: + return metaServiceStatePending + default: + return metaServiceStateFailed + } +} + +type forwardOnlyStatusDisplay struct { + o *command.OutStream + states map[string]metaServiceState +} + +func (d *forwardOnlyStatusDisplay) OnStatus(status serviceStatus) { + state := metaStateFromStatus(status) + if d.states[status.name] != state { + d.states[status.name] = state + fmt.Fprintf(d.o, "%s: %s\n", status.name, state) + } +} + +type interactiveStatusDisplay struct { + o *command.OutStream + statuses []serviceStatus +} + +func (d *interactiveStatusDisplay) OnStatus(status serviceStatus) { + b := aec.EmptyBuilder + for ix := 0; ix < len(d.statuses); ix++ { + b = b.Up(1).EraseLine(aec.EraseModes.All) + } + b = b.Column(0) + fmt.Fprint(d.o, b.ANSI) + updated := false + for ix, s := range d.statuses { + if s.name == status.name { + d.statuses[ix] = status + s = status + updated = true + } + displayInteractiveServiceStatus(s, d.o) + } + if !updated { + d.statuses = append(d.statuses, status) + displayInteractiveServiceStatus(status, d.o) + } +} + +func displayInteractiveServiceStatus(status serviceStatus, o io.Writer) { + state := metaStateFromStatus(status) + totalFailed := status.podsFailed + status.podsSucceeded + status.podsUnknown + fmt.Fprintf(o, "%[1]s: %[2]s\t\t[pod status: %[3]d/%[6]d ready, %[4]d/%[6]d pending, %[5]d/%[6]d failed]\n", status.name, state, + status.podsReady, status.podsPending, totalFailed, status.podsTotal) +} + +func newStatusDisplay(o *command.OutStream) statusDisplay { + if !o.IsTerminal() { + return &forwardOnlyStatusDisplay{o: o, states: map[string]metaServiceState{}} + } + return &interactiveStatusDisplay{o: o} } diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/list.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/list.go index ec15c9d3f..240f4aa04 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/list.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/list.go @@ -1,44 +1,46 @@ package kubernetes import ( - "sort" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/formatter" "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/config/configfile" + "github.com/pkg/errors" + core_v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "vbom.ml/util/sortorder" ) -// RunList is the kubernetes implementation of docker stack ls -func RunList(dockerCli *KubeCli, opts options.List) error { - stacks, err := getStacks(dockerCli) - if err != nil { - return err - } - format := opts.Format - if len(format) == 0 { - format = formatter.TableFormatKey +// GetStacks lists the kubernetes stacks +func GetStacks(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) { + if opts.AllNamespaces || len(opts.Namespaces) == 0 { + if isAllNamespacesDisabled(kubeCli.ConfigFile().Kubernetes) { + opts.AllNamespaces = true + } + return getStacksWithAllNamespaces(kubeCli, opts) } - stackCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewStackFormat(format), - } - sort.Sort(byName(stacks)) - return formatter.StackWrite(stackCtx, stacks) + return getStacksWithNamespaces(kubeCli, opts, removeDuplicates(opts.Namespaces)) } -type byName []*formatter.Stack - -func (n byName) Len() int { return len(n) } -func (n byName) Swap(i, j int) { n[i], n[j] = n[j], n[i] } -func (n byName) Less(i, j int) bool { return sortorder.NaturalLess(n[i].Name, n[j].Name) } +func isAllNamespacesDisabled(kubeCliConfig *configfile.KubernetesConfig) bool { + return kubeCliConfig == nil || kubeCliConfig != nil && kubeCliConfig.AllNamespaces != "disabled" +} -func getStacks(kubeCli *KubeCli) ([]*formatter.Stack, error) { - stackSvc, err := kubeCli.stacks() +func getStacks(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) { + composeClient, err := kubeCli.composeClient() + if err != nil { + return nil, err + } + stackSvc, err := composeClient.Stacks(opts.AllNamespaces) if err != nil { return nil, err } - stacks, err := stackSvc.List(metav1.ListOptions{}) if err != nil { return nil, err @@ -46,9 +48,89 @@ func getStacks(kubeCli *KubeCli) ([]*formatter.Stack, error) { var formattedStacks []*formatter.Stack for _, stack := range stacks { formattedStacks = append(formattedStacks, &formatter.Stack{ - Name: stack.name, - Services: len(stack.getServices()), + Name: stack.Name, + Services: len(stack.getServices()), + Orchestrator: "Kubernetes", + Namespace: stack.Namespace, }) } return formattedStacks, nil } + +func getStacksWithAllNamespaces(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) { + stacks, err := getStacks(kubeCli, opts) + if !apierrs.IsForbidden(err) { + return stacks, err + } + namespaces, err2 := getUserVisibleNamespaces(*kubeCli) + if err2 != nil { + return nil, errors.Wrap(err2, "failed to query user visible namespaces") + } + if namespaces == nil { + // UCP API not present, fall back to Kubernetes error + return nil, err + } + opts.AllNamespaces = false + return getStacksWithNamespaces(kubeCli, opts, namespaces) +} + +func getUserVisibleNamespaces(dockerCli command.Cli) ([]string, error) { + host := dockerCli.Client().DaemonHost() + endpoint, err := url.Parse(host) + if err != nil { + return nil, err + } + endpoint.Scheme = "https" + endpoint.Path = "/kubernetesNamespaces" + resp, err := dockerCli.Client().HTTPClient().Get(endpoint.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrapf(err, "received %d status and unable to read response", resp.StatusCode) + } + switch resp.StatusCode { + case http.StatusOK: + nms := &core_v1.NamespaceList{} + if err := json.Unmarshal(body, nms); err != nil { + return nil, errors.Wrapf(err, "unmarshal failed: %s", string(body)) + } + namespaces := make([]string, len(nms.Items)) + for i, namespace := range nms.Items { + namespaces[i] = namespace.Name + } + return namespaces, nil + case http.StatusNotFound: + // UCP API not present + return nil, nil + default: + return nil, fmt.Errorf("received %d status while retrieving namespaces: %s", resp.StatusCode, string(body)) + } +} + +func getStacksWithNamespaces(kubeCli *KubeCli, opts options.List, namespaces []string) ([]*formatter.Stack, error) { + stacks := []*formatter.Stack{} + for _, namespace := range namespaces { + kubeCli.kubeNamespace = namespace + ss, err := getStacks(kubeCli, opts) + if err != nil { + return nil, err + } + stacks = append(stacks, ss...) + } + return stacks, nil +} + +func removeDuplicates(namespaces []string) []string { + found := make(map[string]bool) + results := namespaces[:0] + for _, n := range namespaces { + if !found[n] { + results = append(results, n) + found[n] = true + } + } + return results +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/ps.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/ps.go index 60fdb32e5..a4ee14f63 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/ps.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/ps.go @@ -10,97 +10,103 @@ import ( "github.com/docker/cli/cli/command/task" "github.com/docker/docker/api/types/swarm" apiv1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/pkg/api" ) +var supportedPSFilters = map[string]bool{ + "name": true, + "service": true, + "node": true, +} + // RunPS is the kubernetes implementation of docker stack ps func RunPS(dockerCli *KubeCli, options options.PS) error { - namespace := options.Namespace - - // Initialize clients + filters := options.Filter.Value() + if err := filters.Validate(supportedPSFilters); err != nil { + return err + } client, err := dockerCli.composeClient() if err != nil { return err } - stacks, err := dockerCli.stacks() + stacks, err := client.Stacks(false) if err != nil { return err } - podsClient := client.Pods() - - // Fetch pods - if _, err := stacks.Get(namespace); err != nil { - return fmt.Errorf("nothing found in stack: %s", namespace) + stackName := options.Namespace + _, err = stacks.Get(stackName) + if apierrs.IsNotFound(err) { + return fmt.Errorf("nothing found in stack: %s", stackName) } - - pods, err := fetchPods(namespace, podsClient) if err != nil { return err } - + pods, err := fetchPods(stackName, client.Pods(), filters) + if err != nil { + return err + } if len(pods) == 0 { - return fmt.Errorf("nothing found in stack: %s", namespace) + return fmt.Errorf("nothing found in stack: %s", stackName) } + return printTasks(dockerCli, options, stackName, client, pods) +} +func printTasks(dockerCli command.Cli, options options.PS, namespace string, client corev1.NodesGetter, pods []apiv1.Pod) error { format := options.Format - if len(format) == 0 { + if format == "" { format = task.DefaultFormat(dockerCli.ConfigFile(), options.Quiet) } - nodeResolver := makeNodeResolver(options.NoResolve, client.Nodes()) tasks := make([]swarm.Task, len(pods)) for i, pod := range pods { tasks[i] = podToTask(pod) } - return print(dockerCli, namespace, tasks, pods, nodeResolver, !options.NoTrunc, options.Quiet, format) -} - -type idResolver func(name string) (string, error) - -func print(dockerCli command.Cli, namespace string, tasks []swarm.Task, pods []apiv1.Pod, nodeResolver idResolver, trunc, quiet bool, format string) error { sort.Stable(tasksBySlot(tasks)) names := map[string]string{} nodes := map[string]string{} - tasksCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewTaskFormat(format, quiet), - Trunc: trunc, + n, err := listNodes(client, options.NoResolve) + if err != nil { + return err } - for i, task := range tasks { - nodeValue, err := nodeResolver(pods[i].Spec.NodeName) + nodeValue, err := resolveNode(pods[i].Spec.NodeName, n, options.NoResolve) if err != nil { return err } - names[task.ID] = fmt.Sprintf("%s_%s", namespace, pods[i].Name) nodes[task.ID] = nodeValue } + tasksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewTaskFormat(format, options.Quiet), + Trunc: !options.NoTrunc, + } + return formatter.TaskWrite(tasksCtx, tasks, names, nodes) } -func makeNodeResolver(noResolve bool, nodes corev1.NodeInterface) func(string) (string, error) { +func resolveNode(name string, nodes *apiv1.NodeList, noResolve bool) (string, error) { // Here we have a name and we need to resolve its identifier. To mimic swarm behavior - // we need to resolve the id when noresolve is set, otherwise we return the name. + // we need to resolve to the id when noResolve is set, otherwise we return the name. if noResolve { - return func(name string) (string, error) { - n, err := nodes.List(metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector(api.ObjectNameField, name).String(), - }) - if err != nil { - return "", err + for _, node := range nodes.Items { + if node.Name == name { + return string(node.UID), nil } - if len(n.Items) != 1 { - return "", fmt.Errorf("could not find node '%s'", name) - } - return string(n.Items[0].UID), nil } + return "", fmt.Errorf("could not find node '%s'", name) + } + return name, nil +} + +func listNodes(client corev1.NodesGetter, noResolve bool) (*apiv1.NodeList, error) { + if noResolve { + return client.Nodes().List(metav1.ListOptions{}) } - return func(name string) (string, error) { return name, nil } + return nil, nil } diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/remove.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/remove.go index bf3654a7a..311c7597a 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/remove.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/remove.go @@ -4,20 +4,23 @@ import ( "fmt" "github.com/docker/cli/cli/command/stack/options" + "github.com/pkg/errors" ) // RunRemove is the kubernetes implementation of docker stack remove func RunRemove(dockerCli *KubeCli, opts options.Remove) error { - stacks, err := dockerCli.stacks() + composeClient, err := dockerCli.composeClient() + if err != nil { + return err + } + stacks, err := composeClient.Stacks(false) if err != nil { return err } for _, stack := range opts.Namespaces { fmt.Fprintf(dockerCli.Out(), "Removing stack: %s\n", stack) - err := stacks.Delete(stack) - if err != nil { - fmt.Fprintf(dockerCli.Out(), "Failed to remove stack %s: %s\n", stack, err) - return err + if err := stacks.Delete(stack); err != nil { + return errors.Wrapf(err, "Failed to remove stack %s", stack) } } return nil diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/services.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/services.go index 1c98c4fd6..a2d13e567 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/services.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/services.go @@ -2,46 +2,117 @@ package kubernetes import ( "fmt" + "strings" "github.com/docker/cli/cli/command/formatter" "github.com/docker/cli/cli/command/stack/options" "github.com/docker/cli/kubernetes/labels" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + appsv1beta2 "k8s.io/api/apps/v1beta2" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var supportedServicesFilters = map[string]bool{ + "mode": true, + "name": true, + "label": true, +} + +func generateSelector(labels map[string][]string) []string { + var result []string + for k, v := range labels { + for _, val := range v { + result = append(result, fmt.Sprintf("%s=%s", k, val)) + } + if len(v) == 0 { + result = append(result, k) + } + } + return result +} + +func parseLabelFilters(rawFilters []string) map[string][]string { + labels := map[string][]string{} + for _, rawLabel := range rawFilters { + v := strings.SplitN(rawLabel, "=", 2) + key := v[0] + if len(v) > 1 { + labels[key] = append(labels[key], v[1]) + } else if _, ok := labels[key]; !ok { + labels[key] = []string{} + } + } + return labels +} + +func generateLabelSelector(f filters.Args, stackName string) string { + selectors := append(generateSelector(parseLabelFilters(f.Get("label"))), labels.SelectorForStack(stackName)) + return strings.Join(selectors, ",") +} + +func getResourcesForServiceList(dockerCli *KubeCli, filters filters.Args, labelSelector string) (*appsv1beta2.ReplicaSetList, *appsv1beta2.DaemonSetList, *corev1.ServiceList, error) { + client, err := dockerCli.composeClient() + if err != nil { + return nil, nil, nil, err + } + modes := filters.Get("mode") + replicas := &appsv1beta2.ReplicaSetList{} + if len(modes) == 0 || filters.ExactMatch("mode", "replicated") { + if replicas, err = client.ReplicaSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil { + return nil, nil, nil, err + } + } + daemons := &appsv1beta2.DaemonSetList{} + if len(modes) == 0 || filters.ExactMatch("mode", "global") { + if daemons, err = client.DaemonSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil { + return nil, nil, nil, err + } + } + services, err := client.Services().List(metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return nil, nil, nil, err + } + return replicas, daemons, services, nil +} + // RunServices is the kubernetes implementation of docker stack services func RunServices(dockerCli *KubeCli, opts options.Services) error { - // Initialize clients + filters := opts.Filter.Value() + if err := filters.Validate(supportedServicesFilters); err != nil { + return err + } client, err := dockerCli.composeClient() if err != nil { return nil } - stacks, err := dockerCli.stacks() + stacks, err := client.Stacks(false) if err != nil { - return err - } - replicas := client.ReplicaSets() - - if _, err := stacks.Get(opts.Namespace); err != nil { - fmt.Fprintf(dockerCli.Err(), "Nothing found in stack: %s\n", opts.Namespace) return nil } - - replicasList, err := replicas.List(metav1.ListOptions{LabelSelector: labels.SelectorForStack(opts.Namespace)}) + stackName := opts.Namespace + _, err = stacks.Get(stackName) + if apierrs.IsNotFound(err) { + return fmt.Errorf("nothing found in stack: %s", stackName) + } if err != nil { return err } - servicesList, err := client.Services().List(metav1.ListOptions{LabelSelector: labels.SelectorForStack(opts.Namespace)}) + labelSelector := generateLabelSelector(filters, stackName) + replicasList, daemonsList, servicesList, err := getResourcesForServiceList(dockerCli, filters, labelSelector) if err != nil { return err } // Convert Replicas sets and kubernetes services to swam services and formatter informations - services, info, err := replicasToServices(replicasList, servicesList) + services, info, err := convertToServices(replicasList, daemonsList, servicesList) if err != nil { return err } + services = filterServicesByName(services, filters.Get("name"), stackName) if opts.Quiet { info = map[string]formatter.ServiceListInfo{} @@ -62,3 +133,26 @@ func RunServices(dockerCli *KubeCli, opts options.Services) error { } return formatter.ServiceListWrite(servicesCtx, services, info) } + +func filterServicesByName(services []swarm.Service, names []string, stackName string) []swarm.Service { + if len(names) == 0 { + return services + } + prefix := stackName + "_" + // Accepts unprefixed service name (for compatibility with existing swarm scripts where service names are prefixed by stack names) + for i, n := range names { + if !strings.HasPrefix(n, prefix) { + names[i] = stackName + "_" + n + } + } + // Filter services + result := []swarm.Service{} + for _, s := range services { + for _, n := range names { + if strings.HasPrefix(s.Spec.Name, n) { + result = append(result, s) + } + } + } + return result +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/stack.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/stack.go index 76cf07aa2..67d1af689 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/stack.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/stack.go @@ -2,7 +2,7 @@ package kubernetes import ( "io/ioutil" - "path" + "path/filepath" "sort" "github.com/docker/cli/kubernetes/compose/v1beta2" @@ -12,17 +12,18 @@ import ( corev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) -// stack is the main type used by stack commands so they remain independent from kubernetes compose component version. -type stack struct { - name string - composeFile string - spec *v1beta2.StackSpec +// Stack is the main type used by stack commands so they remain independent from kubernetes compose component version. +type Stack struct { + Name string + Namespace string + ComposeFile string + Spec *v1beta2.StackSpec } // getServices returns all the stack service names, sorted lexicographically -func (s *stack) getServices() []string { - services := make([]string, len(s.spec.Services)) - for i, service := range s.spec.Services { +func (s *Stack) getServices() []string { + services := make([]string, len(s.Spec.Services)) + for i, service := range s.Spec.Services { services[i] = service.Name } sort.Strings(services) @@ -30,19 +31,19 @@ func (s *stack) getServices() []string { } // createFileBasedConfigMaps creates a Kubernetes ConfigMap for each Compose global file-based config. -func (s *stack) createFileBasedConfigMaps(configMaps corev1.ConfigMapInterface) error { - for name, config := range s.spec.Configs { +func (s *Stack) createFileBasedConfigMaps(configMaps corev1.ConfigMapInterface) error { + for name, config := range s.Spec.Configs { if config.File == "" { continue } - fileName := path.Base(config.File) + fileName := filepath.Base(config.File) content, err := ioutil.ReadFile(config.File) if err != nil { return err } - if _, err := configMaps.Create(toConfigMap(s.name, name, fileName, content)); err != nil { + if _, err := configMaps.Create(toConfigMap(s.Name, name, fileName, content)); err != nil { return err } } @@ -70,19 +71,19 @@ func toConfigMap(stackName, name, key string, content []byte) *apiv1.ConfigMap { } // createFileBasedSecrets creates a Kubernetes Secret for each Compose global file-based secret. -func (s *stack) createFileBasedSecrets(secrets corev1.SecretInterface) error { - for name, secret := range s.spec.Secrets { +func (s *Stack) createFileBasedSecrets(secrets corev1.SecretInterface) error { + for name, secret := range s.Spec.Secrets { if secret.File == "" { continue } - fileName := path.Base(secret.File) + fileName := filepath.Base(secret.File) content, err := ioutil.ReadFile(secret.File) if err != nil { return err } - if _, err := secrets.Create(toSecret(s.name, name, fileName, content)); err != nil { + if _, err := secrets.Create(toSecret(s.Name, name, fileName, content)); err != nil { return err } } diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/stackclient.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/stackclient.go index 38424d8be..e769e7466 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/stackclient.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/stackclient.go @@ -3,32 +3,31 @@ package kubernetes import ( "fmt" - composetypes "github.com/docker/cli/cli/compose/types" composev1beta1 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1" composev1beta2 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2" "github.com/docker/cli/kubernetes/labels" - yaml "gopkg.in/yaml.v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" ) -// stackClient talks to a kubernetes compose component. -type stackClient interface { - CreateOrUpdate(s stack) error +// StackClient talks to a kubernetes compose component. +type StackClient interface { + StackConverter + CreateOrUpdate(s Stack) error Delete(name string) error - Get(name string) (stack, error) - List(opts metav1.ListOptions) ([]stack, error) - IsColliding(servicesClient corev1.ServiceInterface, s stack) error - FromCompose(name string, cfg composetypes.Config) (stack, error) + Get(name string) (Stack, error) + List(opts metav1.ListOptions) ([]Stack, error) + IsColliding(servicesClient corev1.ServiceInterface, s Stack) error } // stackV1Beta1 implements stackClient interface and talks to compose component v1beta1. type stackV1Beta1 struct { + stackV1Beta1Converter stacks composev1beta1.StackInterface } -func newStackV1Beta1(config *rest.Config, namespace string) (stackClient, error) { +func newStackV1Beta1(config *rest.Config, namespace string) (*stackV1Beta1, error) { client, err := composev1beta1.NewForConfig(config) if err != nil { return nil, err @@ -36,10 +35,10 @@ func newStackV1Beta1(config *rest.Config, namespace string) (stackClient, error) return &stackV1Beta1{stacks: client.Stacks(namespace)}, nil } -func (s *stackV1Beta1) CreateOrUpdate(internalStack stack) error { +func (s *stackV1Beta1) CreateOrUpdate(internalStack Stack) error { // If it already exists, update the stack - if stackBeta1, err := s.stacks.Get(internalStack.name, metav1.GetOptions{}); err == nil { - stackBeta1.Spec.ComposeFile = internalStack.composeFile + if stackBeta1, err := s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil { + stackBeta1.Spec.ComposeFile = internalStack.ComposeFile _, err := s.stacks.Update(stackBeta1) return err } @@ -52,20 +51,20 @@ func (s *stackV1Beta1) Delete(name string) error { return s.stacks.Delete(name, &metav1.DeleteOptions{}) } -func (s *stackV1Beta1) Get(name string) (stack, error) { +func (s *stackV1Beta1) Get(name string) (Stack, error) { stackBeta1, err := s.stacks.Get(name, metav1.GetOptions{}) if err != nil { - return stack{}, err + return Stack{}, err } return stackFromV1beta1(stackBeta1) } -func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]stack, error) { +func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]Stack, error) { list, err := s.stacks.List(opts) if err != nil { return nil, err } - stacks := make([]stack, len(list.Items)) + stacks := make([]Stack, len(list.Items)) for i := range list.Items { stack, err := stackFromV1beta1(&list.Items[i]) if err != nil { @@ -77,9 +76,9 @@ func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]stack, error) { } // IsColliding verifies that services defined in the stack collides with already deployed services -func (s *stackV1Beta1) IsColliding(servicesClient corev1.ServiceInterface, st stack) error { +func (s *stackV1Beta1) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error { for _, srv := range st.getServices() { - if err := verify(servicesClient, st.name, srv); err != nil { + if err := verify(servicesClient, st.Name, srv); err != nil { return err } } @@ -103,24 +102,13 @@ func verify(services corev1.ServiceInterface, stackName string, service string) return nil } -func (s *stackV1Beta1) FromCompose(name string, cfg composetypes.Config) (stack, error) { - res, err := yaml.Marshal(cfg) - if err != nil { - return stack{}, err - } - return stack{ - name: name, - composeFile: string(res), - spec: FromComposeConfig(&cfg), - }, nil -} - // stackV1Beta2 implements stackClient interface and talks to compose component v1beta2. type stackV1Beta2 struct { + stackV1Beta2Converter stacks composev1beta2.StackInterface } -func newStackV1Beta2(config *rest.Config, namespace string) (stackClient, error) { +func newStackV1Beta2(config *rest.Config, namespace string) (*stackV1Beta2, error) { client, err := composev1beta2.NewForConfig(config) if err != nil { return nil, err @@ -128,10 +116,10 @@ func newStackV1Beta2(config *rest.Config, namespace string) (stackClient, error) return &stackV1Beta2{stacks: client.Stacks(namespace)}, nil } -func (s *stackV1Beta2) CreateOrUpdate(internalStack stack) error { +func (s *stackV1Beta2) CreateOrUpdate(internalStack Stack) error { // If it already exists, update the stack - if stackBeta2, err := s.stacks.Get(internalStack.name, metav1.GetOptions{}); err == nil { - stackBeta2.Spec = internalStack.spec + if stackBeta2, err := s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil { + stackBeta2.Spec = internalStack.Spec _, err := s.stacks.Update(stackBeta2) return err } @@ -144,20 +132,20 @@ func (s *stackV1Beta2) Delete(name string) error { return s.stacks.Delete(name, &metav1.DeleteOptions{}) } -func (s *stackV1Beta2) Get(name string) (stack, error) { +func (s *stackV1Beta2) Get(name string) (Stack, error) { stackBeta2, err := s.stacks.Get(name, metav1.GetOptions{}) if err != nil { - return stack{}, err + return Stack{}, err } return stackFromV1beta2(stackBeta2), nil } -func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]stack, error) { +func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]Stack, error) { list, err := s.stacks.List(opts) if err != nil { return nil, err } - stacks := make([]stack, len(list.Items)) + stacks := make([]Stack, len(list.Items)) for i := range list.Items { stacks[i] = stackFromV1beta2(&list.Items[i]) } @@ -165,13 +153,6 @@ func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]stack, error) { } // IsColliding is handle server side with the compose api v1beta2, so nothing to do here -func (s *stackV1Beta2) IsColliding(servicesClient corev1.ServiceInterface, st stack) error { +func (s *stackV1Beta2) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error { return nil } - -func (s *stackV1Beta2) FromCompose(name string, cfg composetypes.Config) (stack, error) { - return stack{ - name: name, - spec: FromComposeConfig(&cfg), - }, nil -} diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/warnings.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/warnings.go new file mode 100644 index 000000000..eb4598db4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/warnings.go @@ -0,0 +1,145 @@ +package kubernetes + +import ( + "fmt" + "io" + + composetypes "github.com/docker/cli/cli/compose/types" +) + +func warnUnsupportedFeatures(stderr io.Writer, cfg *composetypes.Config) { + warnForGlobalNetworks(stderr, cfg) + for _, s := range cfg.Services { + warnForServiceNetworks(stderr, s) + warnForUnsupportedDeploymentStrategy(stderr, s) + warnForUnsupportedRestartPolicy(stderr, s) + warnForDeprecatedProperties(stderr, s) + warnForUnsupportedProperties(stderr, s) + } +} + +func warnForGlobalNetworks(stderr io.Writer, config *composetypes.Config) { + for network := range config.Networks { + fmt.Fprintf(stderr, "top-level network %q is ignored\n", network) + } +} + +func warnServicef(stderr io.Writer, service, format string, args ...interface{}) { + fmt.Fprintf(stderr, "service \"%s\": %s\n", service, fmt.Sprintf(format, args...)) +} + +func warnForServiceNetworks(stderr io.Writer, s composetypes.ServiceConfig) { + for network := range s.Networks { + warnServicef(stderr, s.Name, "network %q is ignored", network) + } +} + +func warnForDeprecatedProperties(stderr io.Writer, s composetypes.ServiceConfig) { + if s.ContainerName != "" { + warnServicef(stderr, s.Name, "container_name is deprecated") + } + if len(s.Expose) > 0 { + warnServicef(stderr, s.Name, "expose is deprecated") + } +} + +func warnForUnsupportedDeploymentStrategy(stderr io.Writer, s composetypes.ServiceConfig) { + config := s.Deploy.UpdateConfig + if config == nil { + return + } + if config.Delay != 0 { + warnServicef(stderr, s.Name, "update_config.delay is not supported") + } + if config.FailureAction != "" { + warnServicef(stderr, s.Name, "update_config.failure_action is not supported") + } + if config.Monitor != 0 { + warnServicef(stderr, s.Name, "update_config.monitor is not supported") + } + if config.MaxFailureRatio != 0 { + warnServicef(stderr, s.Name, "update_config.max_failure_ratio is not supported") + } +} + +func warnForUnsupportedRestartPolicy(stderr io.Writer, s composetypes.ServiceConfig) { + policy := s.Deploy.RestartPolicy + if policy == nil { + return + } + + if policy.Delay != nil { + warnServicef(stderr, s.Name, "restart_policy.delay is ignored") + } + if policy.MaxAttempts != nil { + warnServicef(stderr, s.Name, "restart_policy.max_attempts is ignored") + } + if policy.Window != nil { + warnServicef(stderr, s.Name, "restart_policy.window is ignored") + } +} + +func warnForUnsupportedProperties(stderr io.Writer, s composetypes.ServiceConfig) { // nolint: gocyclo + if build := s.Build; build.Context != "" || build.Dockerfile != "" || len(build.Args) > 0 || len(build.Labels) > 0 || len(build.CacheFrom) > 0 || build.Network != "" || build.Target != "" { + warnServicef(stderr, s.Name, "build is ignored") + } + if s.CgroupParent != "" { + warnServicef(stderr, s.Name, "cgroup_parent is ignored") + } + if len(s.Devices) > 0 { + warnServicef(stderr, s.Name, "devices are ignored") + } + if s.DomainName != "" { + warnServicef(stderr, s.Name, "domainname is ignored") + } + if len(s.ExternalLinks) > 0 { + warnServicef(stderr, s.Name, "external_links are ignored") + } + if len(s.Links) > 0 { + warnServicef(stderr, s.Name, "links are ignored") + } + if s.MacAddress != "" { + warnServicef(stderr, s.Name, "mac_address is ignored") + } + if s.NetworkMode != "" { + warnServicef(stderr, s.Name, "network_mode is ignored") + } + if s.Restart != "" { + warnServicef(stderr, s.Name, "restart is ignored") + } + if len(s.SecurityOpt) > 0 { + warnServicef(stderr, s.Name, "security_opt are ignored") + } + if len(s.Ulimits) > 0 { + warnServicef(stderr, s.Name, "ulimits are ignored") + } + if len(s.DependsOn) > 0 { + warnServicef(stderr, s.Name, "depends_on are ignored") + } + if s.CredentialSpec.File != "" { + warnServicef(stderr, s.Name, "credential_spec is ignored") + } + if len(s.DNS) > 0 { + warnServicef(stderr, s.Name, "dns are ignored") + } + if len(s.DNSSearch) > 0 { + warnServicef(stderr, s.Name, "dns_search are ignored") + } + if len(s.EnvFile) > 0 { + warnServicef(stderr, s.Name, "env_file are ignored") + } + if s.StopSignal != "" { + warnServicef(stderr, s.Name, "stop_signal is ignored") + } + if s.Logging != nil { + warnServicef(stderr, s.Name, "logging is ignored") + } + for _, m := range s.Volumes { + if m.Volume != nil && m.Volume.NoCopy { + warnServicef(stderr, s.Name, "volume.nocopy is ignored") + } + if m.Bind != nil && m.Bind.Propagation != "" { + warnServicef(stderr, s.Name, "volume.propagation is ignored") + } + } +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/watcher.go b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/watcher.go index cfe28800b..93d3358b4 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/kubernetes/watcher.go +++ b/vendor/github.com/docker/cli/cli/command/stack/kubernetes/watcher.go @@ -1,116 +1,255 @@ package kubernetes import ( - "fmt" + "context" + "sync" "time" + apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1" "github.com/docker/cli/kubernetes/labels" + "github.com/pkg/errors" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/apimachinery/pkg/runtime" + runtimeutil "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + podutils "k8s.io/kubernetes/pkg/api/v1/pod" ) -// DeployWatcher watches a stack deployement -type DeployWatcher struct { - Pods corev1.PodInterface +type stackListWatch interface { + List(opts metav1.ListOptions) (*apiv1beta1.StackList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) } -// Watch watches a stuck deployement and return a chan that will holds the state of the stack -func (w DeployWatcher) Watch(name string, serviceNames []string) chan bool { - stop := make(chan bool) - - go w.waitForPods(name, serviceNames, stop) +type podListWatch interface { + List(opts metav1.ListOptions) (*apiv1.PodList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) +} - return stop +// DeployWatcher watches a stack deployement +type deployWatcher struct { + pods podListWatch + stacks stackListWatch } -func (w DeployWatcher) waitForPods(stackName string, serviceNames []string, stop chan bool) { - starts := map[string]int32{} +// Watch watches a stuck deployement and return a chan that will holds the state of the stack +func (w *deployWatcher) Watch(name string, serviceNames []string, statusUpdates chan serviceStatus) error { + errC := make(chan error, 1) + defer close(errC) + + handlers := runtimeutil.ErrorHandlers + + // informer errors are reported using global error handlers + runtimeutil.ErrorHandlers = append(handlers, func(err error) { + errC <- err + }) + defer func() { + runtimeutil.ErrorHandlers = handlers + }() + + ctx, cancel := context.WithCancel(context.Background()) + wg := sync.WaitGroup{} + defer func() { + cancel() + wg.Wait() + }() + wg.Add(2) + go func() { + defer wg.Done() + w.watchStackStatus(ctx, name, errC) + }() + go func() { + defer wg.Done() + w.waitForPods(ctx, name, serviceNames, errC, statusUpdates) + }() + + return <-errC +} - for { - time.Sleep(1 * time.Second) +type stackWatcher struct { + resultChan chan error + stackName string +} - list, err := w.Pods.List(metav1.ListOptions{ - LabelSelector: labels.SelectorForStack(stackName), - IncludeUninitialized: true, - }) - if err != nil { - stop <- true - return - } +var _ cache.ResourceEventHandler = &stackWatcher{} - for i := range list.Items { - pod := list.Items[i] - if pod.Status.Phase != apiv1.PodRunning { - continue - } +func (sw *stackWatcher) OnAdd(obj interface{}) { + stack, ok := obj.(*apiv1beta1.Stack) + switch { + case !ok: + sw.resultChan <- errors.Errorf("stack %s has incorrect type", sw.stackName) + case stack.Status.Phase == apiv1beta1.StackFailure: + sw.resultChan <- errors.Errorf("stack %s failed with status %s: %s", sw.stackName, stack.Status.Phase, stack.Status.Message) + } +} - startCount := startCount(pod) - serviceName := pod.Labels[labels.ForServiceName] - if startCount != starts[serviceName] { - if startCount == 1 { - fmt.Printf(" - Service %s has one container running\n", serviceName) - } else { - fmt.Printf(" - Service %s was restarted %d %s\n", serviceName, startCount-1, timeTimes(startCount-1)) - } +func (sw *stackWatcher) OnUpdate(oldObj, newObj interface{}) { + sw.OnAdd(newObj) +} - starts[serviceName] = startCount - } - } +func (sw *stackWatcher) OnDelete(obj interface{}) { +} - if allReady(list.Items, serviceNames) { - stop <- true - return - } +func (w *deployWatcher) watchStackStatus(ctx context.Context, stackname string, e chan error) { + informer := newStackInformer(w.stacks, stackname) + sw := &stackWatcher{ + resultChan: e, } + informer.AddEventHandler(sw) + informer.Run(ctx.Done()) } -func startCount(pod apiv1.Pod) int32 { - restart := int32(0) - - for _, status := range pod.Status.ContainerStatuses { - restart += status.RestartCount - } +type serviceStatus struct { + name string + podsPending int + podsRunning int + podsSucceeded int + podsFailed int + podsUnknown int + podsReady int + podsTotal int +} - return 1 + restart +type podWatcher struct { + stackName string + services map[string]serviceStatus + resultChan chan error + starts map[string]int32 + indexer cache.Indexer + statusUpdates chan serviceStatus } -func allReady(pods []apiv1.Pod, serviceNames []string) bool { - serviceUp := map[string]bool{} +var _ cache.ResourceEventHandler = &podWatcher{} - for _, pod := range pods { - if time.Since(pod.GetCreationTimestamp().Time) < 10*time.Second { - return false +func (pw *podWatcher) handlePod(obj interface{}) { + pod, ok := obj.(*apiv1.Pod) + if !ok { + pw.resultChan <- errors.Errorf("Pod has incorrect type in stack %s", pw.stackName) + return + } + serviceName := pod.Labels[labels.ForServiceName] + pw.updateServiceStatus(serviceName) + if pw.allReady() { + select { + case pw.resultChan <- nil: + default: + // result has already been reported, just don't block } + } +} - ready := false - for _, cond := range pod.Status.Conditions { - if cond.Type == apiv1.PodReady && cond.Status == apiv1.ConditionTrue { - ready = true +func (pw *podWatcher) updateServiceStatus(serviceName string) { + pods, _ := pw.indexer.ByIndex("byservice", serviceName) + status := serviceStatus{name: serviceName} + for _, obj := range pods { + if pod, ok := obj.(*apiv1.Pod); ok { + switch pod.Status.Phase { + case apiv1.PodPending: + status.podsPending++ + case apiv1.PodRunning: + status.podsRunning++ + case apiv1.PodSucceeded: + status.podsSucceeded++ + case apiv1.PodFailed: + status.podsFailed++ + case apiv1.PodUnknown: + status.podsUnknown++ + } + if podutils.IsPodReady(pod) { + status.podsReady++ } } - - if !ready { - return false - } - - serviceName := pod.Labels[labels.ForServiceName] - serviceUp[serviceName] = true } + status.podsTotal = len(pods) + oldStatus := pw.services[serviceName] + if oldStatus != status { + pw.statusUpdates <- status + } + pw.services[serviceName] = status +} - for _, serviceName := range serviceNames { - if !serviceUp[serviceName] { +func (pw *podWatcher) allReady() bool { + for _, status := range pw.services { + if status.podsReady == 0 { return false } } - return true } -func timeTimes(n int32) string { - if n == 1 { - return "time" +func (pw *podWatcher) OnAdd(obj interface{}) { + pw.handlePod(obj) +} + +func (pw *podWatcher) OnUpdate(oldObj, newObj interface{}) { + pw.handlePod(newObj) +} + +func (pw *podWatcher) OnDelete(obj interface{}) { + pw.handlePod(obj) +} + +func (w *deployWatcher) waitForPods(ctx context.Context, stackName string, serviceNames []string, e chan error, statusUpdates chan serviceStatus) { + informer := newPodInformer(w.pods, stackName, cache.Indexers{ + "byservice": func(obj interface{}) ([]string, error) { + pod, ok := obj.(*apiv1.Pod) + if !ok { + return nil, errors.Errorf("Pod has incorrect type in stack %s", stackName) + } + return []string{pod.Labels[labels.ForServiceName]}, nil + }}) + services := map[string]serviceStatus{} + for _, name := range serviceNames { + services[name] = serviceStatus{name: name} + } + pw := &podWatcher{ + stackName: stackName, + services: services, + resultChan: e, + starts: map[string]int32{}, + indexer: informer.GetIndexer(), + statusUpdates: statusUpdates, } + informer.AddEventHandler(pw) + informer.Run(ctx.Done()) +} + +func newPodInformer(podsClient podListWatch, stackName string, indexers cache.Indexers) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + options.IncludeUninitialized = true + return podsClient.List(options) + }, + + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + options.IncludeUninitialized = true + return podsClient.Watch(options) + }, + }, + &apiv1.Pod{}, + time.Second*5, + indexers, + ) +} - return "times" +func newStackInformer(stacksClient stackListWatch, stackName string) cache.SharedInformer { + return cache.NewSharedInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + return stacksClient.List(options) + }, + + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + return stacksClient.Watch(options) + }, + }, + &apiv1beta1.Stack{}, + time.Second*5, + ) } diff --git a/vendor/github.com/docker/cli/cli/command/stack/list.go b/vendor/github.com/docker/cli/cli/command/stack/list.go new file mode 100644 index 000000000..a0aa3d1cf --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stack/list.go @@ -0,0 +1,79 @@ +package stack + +import ( + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +func newListCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + opts := options.List{} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List stacks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(cmd, dockerCli, opts, common.orchestrator) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.Format, "format", "", "Pretty-print stacks using a Go template") + flags.StringSliceVar(&opts.Namespaces, "namespace", []string{}, "Kubernetes namespaces to use") + flags.SetAnnotation("namespace", "kubernetes", nil) + flags.BoolVarP(&opts.AllNamespaces, "all-namespaces", "", false, "List stacks from all Kubernetes namespaces") + flags.SetAnnotation("all-namespaces", "kubernetes", nil) + return cmd +} + +func runList(cmd *cobra.Command, dockerCli command.Cli, opts options.List, orchestrator command.Orchestrator) error { + stacks := []*formatter.Stack{} + if orchestrator.HasSwarm() { + ss, err := swarm.GetStacks(dockerCli) + if err != nil { + return err + } + stacks = append(stacks, ss...) + } + if orchestrator.HasKubernetes() { + kubeCli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), orchestrator)) + if err != nil { + return err + } + ss, err := kubernetes.GetStacks(kubeCli, opts) + if err != nil { + return err + } + stacks = append(stacks, ss...) + } + return format(dockerCli, opts, orchestrator, stacks) +} + +func format(dockerCli command.Cli, opts options.List, orchestrator command.Orchestrator, stacks []*formatter.Stack) error { + format := opts.Format + if format == "" || format == formatter.TableFormatKey { + format = formatter.SwarmStackTableFormat + if orchestrator.HasKubernetes() { + format = formatter.KubernetesStackTableFormat + } + } + stackCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.Format(format), + } + sort.Slice(stacks, func(i, j int) bool { + return sortorder.NaturalLess(stacks[i].Name, stacks[j].Name) || + !sortorder.NaturalLess(stacks[j].Name, stacks[i].Name) && + sortorder.NaturalLess(stacks[j].Namespace, stacks[i].Namespace) + }) + return formatter.StackWrite(stackCtx, stacks) +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/options/opts.go b/vendor/github.com/docker/cli/cli/command/stack/options/opts.go index 8c8a1acf2..afcecd996 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/options/opts.go +++ b/vendor/github.com/docker/cli/cli/command/stack/options/opts.go @@ -14,7 +14,9 @@ type Deploy struct { // List holds docker stack ls options type List struct { - Format string + Format string + AllNamespaces bool + Namespaces []string } // PS holds docker stack ps options diff --git a/vendor/github.com/docker/cli/cli/command/stack/ps.go b/vendor/github.com/docker/cli/cli/command/stack/ps.go new file mode 100644 index 000000000..4d6215351 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stack/ps.go @@ -0,0 +1,48 @@ +package stack + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + cliopts "github.com/docker/cli/opts" + "github.com/spf13/cobra" +) + +func newPsCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + opts := options.PS{Filter: cliopts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] STACK", + Short: "List the tasks in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespace = args[0] + if err := validateStackName(opts.Namespace); err != nil { + return err + } + + switch { + case common.orchestrator.HasAll(): + return errUnsupportedAllOrchestrator + case common.orchestrator.HasKubernetes(): + kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), common.orchestrator)) + if err != nil { + return err + } + return kubernetes.RunPS(kli, opts) + default: + return swarm.RunPS(dockerCli, opts) + } + }, + } + flags := cmd.Flags() + flags.BoolVar(&opts.NoTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.NoResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided") + flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display task IDs") + flags.StringVar(&opts.Format, "format", "", "Pretty-print tasks using a Go template") + kubernetes.AddNamespaceFlag(flags) + return cmd +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/remove.go b/vendor/github.com/docker/cli/cli/command/stack/remove.go new file mode 100644 index 000000000..737713e48 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stack/remove.go @@ -0,0 +1,43 @@ +package stack + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + var opts options.Remove + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] STACK [STACK...]", + Aliases: []string{"remove", "down"}, + Short: "Remove one or more stacks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespaces = args + if err := validateStackNames(opts.Namespaces); err != nil { + return err + } + + switch { + case common.orchestrator.HasAll(): + return errUnsupportedAllOrchestrator + case common.orchestrator.HasKubernetes(): + kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), common.orchestrator)) + if err != nil { + return err + } + return kubernetes.RunRemove(kli, opts) + default: + return swarm.RunRemove(dockerCli, opts) + } + }, + } + flags := cmd.Flags() + kubernetes.AddNamespaceFlag(flags) + return cmd +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/services.go b/vendor/github.com/docker/cli/cli/command/stack/services.go new file mode 100644 index 000000000..ca6d42c23 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stack/services.go @@ -0,0 +1,46 @@ +package stack + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + cliopts "github.com/docker/cli/opts" + "github.com/spf13/cobra" +) + +func newServicesCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + opts := options.Services{Filter: cliopts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "services [OPTIONS] STACK", + Short: "List the services in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespace = args[0] + if err := validateStackName(opts.Namespace); err != nil { + return err + } + + switch { + case common.orchestrator.HasAll(): + return errUnsupportedAllOrchestrator + case common.orchestrator.HasKubernetes(): + kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), common.orchestrator)) + if err != nil { + return err + } + return kubernetes.RunServices(kli, opts) + default: + return swarm.RunServices(dockerCli, opts) + } + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display IDs") + flags.StringVar(&opts.Format, "format", "", "Pretty-print services using a Go template") + flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided") + kubernetes.AddNamespaceFlag(flags) + return cmd +} diff --git a/vendor/github.com/docker/cli/cli/command/stack/swarm/common.go b/vendor/github.com/docker/cli/cli/command/stack/swarm/common.go index f599b7096..b4193df36 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/swarm/common.go +++ b/vendor/github.com/docker/cli/cli/command/stack/swarm/common.go @@ -1,13 +1,14 @@ package swarm import ( + "context" + "github.com/docker/cli/cli/compose/convert" "github.com/docker/cli/opts" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/client" - "golang.org/x/net/context" ) func getStackFilter(namespace string) filters.Args { @@ -16,7 +17,7 @@ func getStackFilter(namespace string) filters.Args { return filter } -func getServiceFilter(namespace string) filters.Args { +func getStackServiceFilter(namespace string) filters.Args { return getStackFilter(namespace) } @@ -32,42 +33,18 @@ func getAllStacksFilter() filters.Args { return filter } -func getServices( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]swarm.Service, error) { - return apiclient.ServiceList( - ctx, - types.ServiceListOptions{Filters: getServiceFilter(namespace)}) +func getStackServices(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Service, error) { + return apiclient.ServiceList(ctx, types.ServiceListOptions{Filters: getStackServiceFilter(namespace)}) } -func getStackNetworks( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]types.NetworkResource, error) { - return apiclient.NetworkList( - ctx, - types.NetworkListOptions{Filters: getStackFilter(namespace)}) +func getStackNetworks(ctx context.Context, apiclient client.APIClient, namespace string) ([]types.NetworkResource, error) { + return apiclient.NetworkList(ctx, types.NetworkListOptions{Filters: getStackFilter(namespace)}) } -func getStackSecrets( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]swarm.Secret, error) { - return apiclient.SecretList( - ctx, - types.SecretListOptions{Filters: getStackFilter(namespace)}) +func getStackSecrets(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Secret, error) { + return apiclient.SecretList(ctx, types.SecretListOptions{Filters: getStackFilter(namespace)}) } -func getStackConfigs( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]swarm.Config, error) { - return apiclient.ConfigList( - ctx, - types.ConfigListOptions{Filters: getStackFilter(namespace)}) +func getStackConfigs(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Config, error) { + return apiclient.ConfigList(ctx, types.ConfigListOptions{Filters: getStackFilter(namespace)}) } diff --git a/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy.go b/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy.go index a308e2ee6..d11c328f3 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy.go +++ b/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy.go @@ -1,15 +1,16 @@ package swarm import ( + "context" "fmt" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/stack/options" "github.com/docker/cli/cli/compose/convert" + composetypes "github.com/docker/cli/cli/compose/types" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/pkg/errors" - "golang.org/x/net/context" ) // Resolve image constants @@ -21,23 +22,14 @@ const ( ) // RunDeploy is the swarm implementation of docker stack deploy -func RunDeploy(dockerCli command.Cli, opts options.Deploy) error { +func RunDeploy(dockerCli command.Cli, opts options.Deploy, cfg *composetypes.Config) error { ctx := context.Background() if err := validateResolveImageFlag(dockerCli, &opts); err != nil { return err } - switch { - case opts.Bundlefile == "" && len(opts.Composefiles) == 0: - return errors.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).") - case opts.Bundlefile != "" && len(opts.Composefiles) != 0: - return errors.Errorf("You cannot specify both a bundle file and a Compose file.") - case opts.Bundlefile != "": - return deployBundle(ctx, dockerCli, opts) - default: - return deployCompose(ctx, dockerCli, opts) - } + return deployCompose(ctx, dockerCli, opts, cfg) } // validateResolveImageFlag validates the opts.resolveImage command line option @@ -73,9 +65,9 @@ func checkDaemonIsSwarmManager(ctx context.Context, dockerCli command.Cli) error func pruneServices(ctx context.Context, dockerCli command.Cli, namespace convert.Namespace, services map[string]struct{}) { client := dockerCli.Client() - oldServices, err := getServices(ctx, client, namespace.Name()) + oldServices, err := getStackServices(ctx, client, namespace.Name()) if err != nil { - fmt.Fprintf(dockerCli.Err(), "Failed to list services: %s", err) + fmt.Fprintf(dockerCli.Err(), "Failed to list services: %s\n", err) } pruneServices := []swarm.Service{} diff --git a/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy_bundlefile.go b/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy_bundlefile.go index c730f3a23..8db6f66b0 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy_bundlefile.go +++ b/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy_bundlefile.go @@ -1,12 +1,11 @@ package swarm import ( + "context" "fmt" "io" "os" - "golang.org/x/net/context" - "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/bundlefile" "github.com/docker/cli/cli/command/stack/options" @@ -16,7 +15,8 @@ import ( "github.com/pkg/errors" ) -func deployBundle(ctx context.Context, dockerCli command.Cli, opts options.Deploy) error { +// DeployBundle deploy a bundlefile (dab) on a swarm. +func DeployBundle(ctx context.Context, dockerCli command.Cli, opts options.Deploy) error { bundle, err := loadBundlefile(dockerCli.Err(), opts.Namespace, opts.Bundlefile) if err != nil { return err diff --git a/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy_composefile.go b/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy_composefile.go index 621977404..e4574fd98 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy_composefile.go +++ b/vendor/github.com/docker/cli/cli/command/stack/swarm/deploy_composefile.go @@ -1,10 +1,10 @@ package swarm import ( + "context" "fmt" "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/stack/loader" "github.com/docker/cli/cli/command/stack/options" "github.com/docker/cli/cli/compose/convert" composetypes "github.com/docker/cli/cli/compose/types" @@ -14,19 +14,9 @@ import ( apiclient "github.com/docker/docker/client" dockerclient "github.com/docker/docker/client" "github.com/pkg/errors" - "golang.org/x/net/context" ) -func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Deploy) error { - config, err := loader.LoadComposefile(dockerCli, opts) - if err != nil { - return err - } - return DeployCompose(ctx, dockerCli, config, opts) -} - -// DeployCompose deploys the given parsed compose file -func DeployCompose(ctx context.Context, dockerCli command.Cli, config *composetypes.Config, opts options.Deploy) error { +func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Deploy, config *composetypes.Config) error { if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { return err } @@ -213,7 +203,7 @@ func deployServices( apiClient := dockerCli.Client() out := dockerCli.Out() - existingServices, err := getServices(ctx, apiClient, namespace.Name()) + existingServices, err := getStackServices(ctx, apiClient, namespace.Name()) if err != nil { return err } @@ -252,6 +242,12 @@ func deployServices( // service update. serviceSpec.TaskTemplate.ContainerSpec.Image = service.Spec.TaskTemplate.ContainerSpec.Image } + + // Stack deploy does not have a `--force` option. Preserve existing ForceUpdate + // value so that tasks are not re-deployed if not updated. + // TODO move this to API client? + serviceSpec.TaskTemplate.ForceUpdate = service.Spec.TaskTemplate.ForceUpdate + response, err := apiClient.ServiceUpdate( ctx, service.ID, diff --git a/vendor/github.com/docker/cli/cli/command/stack/swarm/list.go b/vendor/github.com/docker/cli/cli/command/stack/swarm/list.go index 3bcdab723..c0c19d0a5 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/swarm/list.go +++ b/vendor/github.com/docker/cli/cli/command/stack/swarm/list.go @@ -1,49 +1,19 @@ package swarm import ( - "sort" + "context" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/formatter" - "github.com/docker/cli/cli/command/stack/options" "github.com/docker/cli/cli/compose/convert" "github.com/docker/docker/api/types" - "github.com/docker/docker/client" "github.com/pkg/errors" - "golang.org/x/net/context" - "vbom.ml/util/sortorder" ) -// RunList is the swarm implementation of docker stack ls -func RunList(dockerCli command.Cli, opts options.List) error { - client := dockerCli.Client() - ctx := context.Background() - - stacks, err := getStacks(ctx, client) - if err != nil { - return err - } - format := opts.Format - if len(format) == 0 { - format = formatter.TableFormatKey - } - stackCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewStackFormat(format), - } - sort.Sort(byName(stacks)) - return formatter.StackWrite(stackCtx, stacks) -} - -type byName []*formatter.Stack - -func (n byName) Len() int { return len(n) } -func (n byName) Swap(i, j int) { n[i], n[j] = n[j], n[i] } -func (n byName) Less(i, j int) bool { return sortorder.NaturalLess(n[i].Name, n[j].Name) } - -func getStacks(ctx context.Context, apiclient client.APIClient) ([]*formatter.Stack, error) { - services, err := apiclient.ServiceList( - ctx, +// GetStacks lists the swarm stacks. +func GetStacks(dockerCli command.Cli) ([]*formatter.Stack, error) { + services, err := dockerCli.Client().ServiceList( + context.Background(), types.ServiceListOptions{Filters: getAllStacksFilter()}) if err != nil { return nil, err @@ -59,8 +29,9 @@ func getStacks(ctx context.Context, apiclient client.APIClient) ([]*formatter.St ztack, ok := m[name] if !ok { m[name] = &formatter.Stack{ - Name: name, - Services: 1, + Name: name, + Services: 1, + Orchestrator: "Swarm", } } else { ztack.Services++ diff --git a/vendor/github.com/docker/cli/cli/command/stack/swarm/ps.go b/vendor/github.com/docker/cli/cli/command/stack/swarm/ps.go index 721f85d9c..5b28a39e7 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/swarm/ps.go +++ b/vendor/github.com/docker/cli/cli/command/stack/swarm/ps.go @@ -1,6 +1,7 @@ package swarm import ( + "context" "fmt" "github.com/docker/cli/cli/command" @@ -8,24 +9,21 @@ import ( "github.com/docker/cli/cli/command/stack/options" "github.com/docker/cli/cli/command/task" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // RunPS is the swarm implementation of docker stack ps func RunPS(dockerCli command.Cli, opts options.PS) error { - namespace := opts.Namespace - client := dockerCli.Client() - ctx := context.Background() - filter := getStackFilterFromOpt(opts.Namespace, opts.Filter) + ctx := context.Background() + client := dockerCli.Client() tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) if err != nil { return err } if len(tasks) == 0 { - return fmt.Errorf("nothing found in stack: %s", namespace) + return fmt.Errorf("nothing found in stack: %s", opts.Namespace) } format := opts.Format diff --git a/vendor/github.com/docker/cli/cli/command/stack/swarm/remove.go b/vendor/github.com/docker/cli/cli/command/stack/swarm/remove.go index 4b5941c25..4dedef12f 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/swarm/remove.go +++ b/vendor/github.com/docker/cli/cli/command/stack/swarm/remove.go @@ -1,6 +1,7 @@ package swarm import ( + "context" "fmt" "sort" "strings" @@ -11,18 +12,16 @@ import ( "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/pkg/errors" - "golang.org/x/net/context" ) // RunRemove is the swarm implementation of docker stack remove func RunRemove(dockerCli command.Cli, opts options.Remove) error { - namespaces := opts.Namespaces client := dockerCli.Client() ctx := context.Background() var errs []string - for _, namespace := range namespaces { - services, err := getServices(ctx, client, namespace) + for _, namespace := range opts.Namespaces { + services, err := getStackServices(ctx, client, namespace) if err != nil { return err } diff --git a/vendor/github.com/docker/cli/cli/command/stack/swarm/services.go b/vendor/github.com/docker/cli/cli/command/stack/swarm/services.go index aceb82dff..07b990adc 100644 --- a/vendor/github.com/docker/cli/cli/command/stack/swarm/services.go +++ b/vendor/github.com/docker/cli/cli/command/stack/swarm/services.go @@ -1,6 +1,7 @@ package swarm import ( + "context" "fmt" "github.com/docker/cli/cli/command" @@ -9,7 +10,6 @@ import ( "github.com/docker/cli/cli/command/stack/options" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // RunServices is the swarm implementation of docker stack services diff --git a/vendor/github.com/docker/cli/cli/command/task/print.go b/vendor/github.com/docker/cli/cli/command/task/print.go index 6526c28be..0f430e509 100644 --- a/vendor/github.com/docker/cli/cli/command/task/print.go +++ b/vendor/github.com/docker/cli/cli/command/task/print.go @@ -1,6 +1,7 @@ package task import ( + "context" "fmt" "sort" @@ -9,7 +10,6 @@ import ( "github.com/docker/cli/cli/command/idresolver" "github.com/docker/cli/cli/config/configfile" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) type tasksBySlot []swarm.Task diff --git a/vendor/github.com/docker/cli/cli/compose/convert/service.go b/vendor/github.com/docker/cli/cli/compose/convert/service.go index 9a20db153..08865968e 100644 --- a/vendor/github.com/docker/cli/cli/compose/convert/service.go +++ b/vendor/github.com/docker/cli/cli/compose/convert/service.go @@ -150,6 +150,7 @@ func Service( ReadOnly: service.ReadOnly, Privileges: &privileges, Isolation: container.Isolation(service.Isolation), + Init: service.Init, }, LogDriver: logDriver, Resources: resources, @@ -159,9 +160,10 @@ func Service( Preferences: getPlacementPreference(service.Deploy.Placement.Preferences), }, }, - EndpointSpec: endpoint, - Mode: mode, - UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig), + EndpointSpec: endpoint, + Mode: mode, + UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig), + RollbackConfig: convertUpdateConfig(service.Deploy.RollbackConfig), } // add an image label to serviceSpec diff --git a/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go b/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go index 70594a325..d4f5c4a43 100644 --- a/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go +++ b/vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go @@ -14,8 +14,8 @@ type Options struct { LookupValue LookupValue // TypeCastMapping maps key paths to functions to cast to a type TypeCastMapping map[Path]Cast - // ErrOnMissingVariable if true causes an error if any missing variable is found - ErrOnMissingVariable bool + // Substitution function to use + Substitute func(string, template.Mapping) (string, error) } // LookupValue is a function which maps from variable names to values. @@ -35,6 +35,9 @@ func Interpolate(config map[string]interface{}, opts Options) (map[string]interf if opts.TypeCastMapping == nil { opts.TypeCastMapping = make(map[Path]Cast) } + if opts.Substitute == nil { + opts.Substitute = template.Substitute + } out := map[string]interface{}{} @@ -53,7 +56,7 @@ func recursiveInterpolate(value interface{}, path Path, opts Options) (interface switch value := value.(type) { case string: - newValue, err := template.Substitute(value, template.Mapping(opts.LookupValue), opts.ErrOnMissingVariable) + newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue)) if err != nil || newValue == value { return value, newPathError(path, err) } @@ -71,11 +74,7 @@ func recursiveInterpolate(value interface{}, path Path, opts Options) (interface if err != nil { return nil, err } - interpolatedKey, err := template.Substitute(key, template.Mapping(opts.LookupValue), opts.ErrOnMissingVariable) - if err != nil { - return nil, err - } - out[interpolatedKey] = interpolatedElem + out[key] = interpolatedElem } return out, nil diff --git a/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go b/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go index 976ded766..888d29b58 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/interpolate.go @@ -64,12 +64,6 @@ func toBoolean(value string) (interface{}, error) { } } -func interpolateConfig(configDict map[string]interface{}, lookupEnv interp.LookupValue, errOnMissingVariable bool) (map[string]interface{}, error) { - return interp.Interpolate( - configDict, - interp.Options{ - LookupValue: lookupEnv, - TypeCastMapping: interpolateTypeCastMapping, - ErrOnMissingVariable: errOnMissingVariable, - }) +func interpolateConfig(configDict map[string]interface{}, opts interp.Options) (map[string]interface{}, error) { + return interp.Interpolate(configDict, opts) } diff --git a/vendor/github.com/docker/cli/cli/compose/loader/loader.go b/vendor/github.com/docker/cli/cli/compose/loader/loader.go index 207148c48..f11619b98 100644 --- a/vendor/github.com/docker/cli/cli/compose/loader/loader.go +++ b/vendor/github.com/docker/cli/cli/compose/loader/loader.go @@ -8,6 +8,7 @@ import ( "sort" "strings" + interp "github.com/docker/cli/cli/compose/interpolation" "github.com/docker/cli/cli/compose/schema" "github.com/docker/cli/cli/compose/template" "github.com/docker/cli/cli/compose/types" @@ -22,6 +23,16 @@ import ( yaml "gopkg.in/yaml.v2" ) +// Options supported by Load +type Options struct { + // Skip schema validation + SkipValidation bool + // Skip interpolation + SkipInterpolation bool + // Interpolation options + Interpolate *interp.Options +} + // ParseYAML reads the bytes from a file, parses the bytes into a mapping // structure, and returns it. func ParseYAML(source []byte) (map[string]interface{}, error) { @@ -40,47 +51,26 @@ func ParseYAML(source []byte) (map[string]interface{}, error) { return converted.(map[string]interface{}), nil } -func processEnabled(configDict map[string]interface{}) bool { - if v, ok := configDict["enabled"]; ok { - e := fmt.Sprintf("%v", v) - e = strings.Trim(e, " ") - reverse := len(e) != 0 && e[0] == '!' - if reverse { - e = strings.Trim(e[1:], " ") - } - disabled := e == "" || e == "0" || e == "false" || e == "FALSE"; - if reverse { - disabled = !disabled - } - if disabled { - return false - } - delete(configDict, "enabled") - } - for k, v := range configDict { - if m, ok := v.(map[string]interface{}); ok { - if ! processEnabled(m) { - delete(configDict, k) - } - } - } - return true -} - -func resolveEnabled(configDict map[string]interface{}) (map[string]interface{}, error) { - if ! processEnabled(configDict) { - return map[string]interface{}{}, nil - } - return configDict, nil -} - // Load reads a ConfigDetails and returns a fully loaded configuration -func Load(configDetails types.ConfigDetails) (*types.Config, error) { +func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Config, error) { if len(configDetails.ConfigFiles) < 1 { return nil, errors.Errorf("No files specified") } + opts := &Options{ + Interpolate: &interp.Options{ + Substitute: template.Substitute, + LookupValue: configDetails.LookupEnv, + TypeCastMapping: interpolateTypeCastMapping, + }, + } + + for _, op := range options { + op(opts) + } + configs := []*types.Config{} + var err error for _, file := range configDetails.ConfigFiles { configDict := file.Config @@ -96,17 +86,17 @@ func Load(configDetails types.ConfigDetails) (*types.Config, error) { return nil, err } - var err error - configDict, err = interpolateConfig(configDict, configDetails.LookupEnv, configDetails.ErrOnMissingVariable) - if err != nil { - return nil, err - } - configDict, err = resolveEnabled(configDict) - if err != nil { - return nil, err + if !opts.SkipInterpolation { + configDict, err = interpolateConfig(configDict, *opts.Interpolate) + if err != nil { + return nil, err + } } - if err := schema.Validate(configDict, configDetails.Version); err != nil { - return nil, err + + if !opts.SkipValidation { + if err := schema.Validate(configDict, configDetails.Version); err != nil { + return nil, err + } } cfg, err := loadSections(configDict, configDetails) @@ -184,6 +174,7 @@ func loadSections(config map[string]interface{}, configDetails types.ConfigDetai return nil, err } } + cfg.Extras = getExtras(config) return &cfg, nil } @@ -401,9 +392,32 @@ func LoadService(name string, serviceDict map[string]interface{}, workingDir str if err := resolveVolumePaths(serviceConfig.Volumes, workingDir, lookupEnv); err != nil { return nil, err } + + serviceConfig.Extras = getExtras(serviceDict) + return serviceConfig, nil } +func loadExtras(name string, source map[string]interface{}) map[string]interface{} { + if dict, ok := source[name].(map[string]interface{}); ok { + return getExtras(dict) + } + return nil +} + +func getExtras(dict map[string]interface{}) map[string]interface{} { + extras := map[string]interface{}{} + for key, value := range dict { + if strings.HasPrefix(key, "x-") { + extras[key] = value + } + } + if len(extras) == 0 { + return nil + } + return extras +} + func updateEnvironment(environment map[string]*string, vars map[string]*string, lookupEnv template.Mapping) { for k, v := range vars { interpolatedV, ok := lookupEnv(k) @@ -516,6 +530,7 @@ func LoadNetworks(source map[string]interface{}, version string) (map[string]typ case network.Name == "": network.Name = name } + network.Extras = loadExtras(name, source) networks[name] = network } return networks, nil @@ -558,6 +573,7 @@ func LoadVolumes(source map[string]interface{}, version string) (map[string]type case volume.Name == "": volume.Name = name } + volume.Extras = loadExtras(name, source) volumes[name] = volume } return volumes, nil @@ -575,7 +591,9 @@ func LoadSecrets(source map[string]interface{}, details types.ConfigDetails) (ma if err != nil { return nil, err } - secrets[name] = types.SecretConfig(obj) + secretConfig := types.SecretConfig(obj) + secretConfig.Extras = loadExtras(name, source) + secrets[name] = secretConfig } return secrets, nil } @@ -592,7 +610,9 @@ func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails) if err != nil { return nil, err } - configs[name] = types.ConfigObjConfig(obj) + configConfig := types.ConfigObjConfig(obj) + configConfig.Extras = loadExtras(name, source) + configs[name] = configConfig } return configs, nil } diff --git a/vendor/github.com/docker/cli/cli/compose/schema/bindata.go b/vendor/github.com/docker/cli/cli/compose/schema/bindata.go index 192859b74..028667054 100644 --- a/vendor/github.com/docker/cli/cli/compose/schema/bindata.go +++ b/vendor/github.com/docker/cli/cli/compose/schema/bindata.go @@ -1,4 +1,4 @@ -// Code generated by "esc -o bindata.go -pkg schema -private -modtime=1518458244 data"; DO NOT EDIT. +// Code generated by "esc -o bindata.go -pkg schema -ignore .*\.go -private -modtime=1518458244 data"; DO NOT EDIT. package schema @@ -465,6 +465,49 @@ DoTuq9lAU9Q4O1xV/59X/w8AAP//zRo7vm9CAAA= `, }, + "/data/config_schema_v3.7.json": { + local: "data/config_schema_v3.7.json", + size: 17777, + modtime: 1518458244, + compressed: ` +H4sIAAAAAAAC/+xcS4/bOBK++1cYmrlNPwLsYBeb2x73tHvehiPQVNnmNEVyipTTTuD/vtDTEkWKlK1O +dzAdIEi3VHzUg8WvHsr31Xqd/KrpAXKSfF4nB2PU58fHP7QU9/XTB4n7xwzJztx/+v2xfvZLcleOY1k5 +hEqxY/u0fpMe//bwj4dyeE1iTgpKIrn9A6ipnyH8WTCEcvBTcgTUTIpkc7cq3ymUCtAw0Mnndbm59boj +aR/0ptUGmdgn1eNzNcN6nWjAI6O9Gbqt/vJ4mf+xI7uzZ+1ttnquiDGA4r/jvVWvvzyR+2//uv/fp/t/ +PqT3m99+Hbwu5Yuwq5fPYMcEM0yKbv2kozw3P527hUmWVcSED9beEa5hyLMA81Xic4jnjuyNeG7Wd/A8 +ZOcoeZEHNdhSvREz9fLL6E8DRTBhk62p3sxiy+WXYbj2GiGGW6o3Yrhe/jaGVy3T7j0mX17uy3/P1ZyT +89Wz9PZXMTHweS5xunyOX56dQD2SzEBxeap27pZZTZCDMEknpvU62RaMZ7bUpYD/lFM89R6u199t996b +p3o/+M1vFN17Dy/deyqFgRdTMTW9dC0CSZ8Bd4xD7AiCtaV7RMaZNqnENGPUOMdzsgV+0wyU0AOkO5R5 +cJZdWnOinRO1HjySc0NwD9GS1Yc81ezbQK5PCRMG9oDJXTd2c7bGjiYLH0z7TJd/NivHhAklKiVZNmCC +IJJTuSNmINdu/tZJIdifBfy7ITFYgD1vhlItP/EeZaFSRbA8hdOyT6jMcyKWOppz+IiQ/OiSGJz3Zo3+ +q261wbY83KwjrNLhLgLuJuxwSkuXBdJY/zH3HK3XScGyeOL9HOJcZsN9iyLfAibnEfHokA5+36xcbyzt +G8IEYCpIDkE7RshAGEZ4qhXQAXmrqQnNJFH+PEHYM23w5KS8cNHfWAYKRKbTOoKZ73qTDLpwZlE3kYmp +K6WeprxUyr0l1sBUA0F6uHK8zAkTMUoFYfCkJKvd2LvzTyCOaWc3s8UA4shQirx10nFXe2/8i5IabneO +3UXbMH7XnenNUHrJTmJOys22a688V7DD8voC7PNQQmLCU87E8/ImDi8GSXqQ2lyDnpIDEG4O9AD0eWJ4 +n2owWmoTY+QsJ/swkWBD97+VkgMRQyJFg/NoyYlp0ilThFdjzmRRVfamlft9Seqz31EME4n+M2RHwFiI +KtUl9HLd0yFsEIxVB6RfHupQdeKMVj9xPsbErivYfmJxGIeaB1rJCS3BMYLWIYtqQod0hCAutCNiHev3 +r4po5keSUaoLphuCuNSHPeOtLA6HtmrnjGjQt4WGPS90/D3SJlxj/z451jPUO2d8IBiYqg94OXduZBOG +wK8Zp6ohjB/6ispD9A+Ykmh+SGR18VMX+FAvPg62bHVHDXqdCG3CS8XFZ23awj1AFVvO9AGyOWNQGkkl +jzsYzkRU/GGYiNauQnoK2ZFx2Fscu2AMAslSKfgpglIbgsEchwZaIDOnVCqzOMZ0J60uVt/lrIYbstL9 +H4mNv05iQ580Nddha20yJlKpQATPhjZSpXskFFIFyKRTFAMHmxVYhwajaTTbC8JDx8zkandlSsGY8GEv +OMuZ/9A4rDYCr9VYzQ3RJuBZlMueiBCmA4SIyOBAcMbVUR3Mned+WkVioGHhvprvrtnIxkk/C3rZ29h4 +0Y/7UBU6GMRVNEKnEVe7owL9c3jogY4q8s1VfrxZKdJ3vrbXj0YEw6qeZtqAoKf4hbZsVAqZG3fFRV0V +Fdn7UzHu2CT6rDbNCT+EFSGpVB7V3MhGd6W8PhcthvMHp7bnnIhjcyZYXuTJ5/UnX8QaL5lXhvZWDmgC +0Pt871eJz+XNnjGcsuXzdLvGsBViZj+JlaqdaoLokwYbS6YbMkLNEkyTrVVWcuZthQE8ugFWGKEhGGRW +fajFrn2IBfp9VlEMy0EW5lp4StDMB7h221mvt6Wtx0yZUI/StqCnXrWxTrsEzSQGj4DIqjpYFHhBUJxR +okMA8YYkP0rOt4Q+p02P1BxQPoHGFUHCOXCm8xh0m2TAyekqy6kLWoTxAiElNKIk0uhKMCPx+iVz8pK2 +y1YkgXNbn1PMwLcmiOqesfFlfTLudwy1qdMQUjW/Dd3/2Zvaia0GXK4OlREDHybxYRL9DF0VG+ilzMGZ +BFimDVAVsfWKJIdchrpAbk/5WypH0CVM8BUg34sAHNR7EICMpgNr8Fw5Y9pXqqLcbtk19pCc1SHmEuZN +paj3EeN5bnR1pd8pgXiujI5yrV+ZyOTX+TBrAWkrTihY0OxWQWuDhAkzu1fBFotC2AGCoDB5LMc5o4m8 +0XIJeYVAsjcoGd2q/Bs+LnC6myk8Px4wCgyH2nNoza+tia7DjGmKYKBbuWteXMVbwrQVJM9NTivoqJMj +4UVEDeSqrhFf7iBi8Nn5rVNIpy3ZAgFaTBdXVBtRQ5VKtXwdI9wqtAln0Zki+VIeNrqxKnEGDO/BdxZb +4UlTv2/feTfusPRo9alLSN11stpEq9h7MJbbf5Ubs4uPriQaMYbQQ1S+bWba4wekL0fpeqdLa6g+PNoM +j/az2//7s9XmM9Dgp4YVVfjLzRssNOKbjXeg/59EraNL2KnWhupDrT+LWq2mm556x8WfKYlHdwav+rWe +bhs2meM/c/BFWN5N+UqV1qKNsKc5X/DeevhtAslOdfC/EgRcoN3RrVMrhbLqmhvtb9H9vqQdP/oyveRT +nEbFye/DBpf6q/LNQD4WSf11TQ8obKICc9f36nZ7TfvduKfjbxi9rsq/59X/AwAA//+9o87pcUUAAA== +`, + }, + "/": { isDir: true, local: "", diff --git a/vendor/github.com/docker/cli/cli/compose/schema/schema.go b/vendor/github.com/docker/cli/cli/compose/schema/schema.go index bbf20fb8a..45b719d79 100644 --- a/vendor/github.com/docker/cli/cli/compose/schema/schema.go +++ b/vendor/github.com/docker/cli/cli/compose/schema/schema.go @@ -1,6 +1,6 @@ package schema -//go:generate esc -o bindata.go -pkg schema -private -modtime=1518458244 data +//go:generate esc -o bindata.go -pkg schema -ignore .*\.go -private -modtime=1518458244 data import ( "fmt" diff --git a/vendor/github.com/docker/cli/cli/compose/template/template.go b/vendor/github.com/docker/cli/cli/compose/template/template.go index 071e7105b..be5d45605 100644 --- a/vendor/github.com/docker/cli/cli/compose/template/template.go +++ b/vendor/github.com/docker/cli/cli/compose/template/template.go @@ -7,7 +7,7 @@ import ( ) var delimiter = "\\$" -var substitution = "[_a-z][_a-z0-9.]*(?::?[-?][^}]*)?" +var substitution = "[_a-z][_a-z0-9]*(?::?[-?][^}]*)?" var patternString = fmt.Sprintf( "%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))", @@ -16,6 +16,14 @@ var patternString = fmt.Sprintf( var pattern = regexp.MustCompile(patternString) +// DefaultSubstituteFuncs contains the default SubstitueFunc used by the docker cli +var DefaultSubstituteFuncs = []SubstituteFunc{ + softDefault, + hardDefault, + requiredNonEmpty, + required, +} + // InvalidTemplateError is returned when a variable template is not in a valid // format type InvalidTemplateError struct { @@ -32,8 +40,14 @@ func (e InvalidTemplateError) Error() string { // and the absence of a value. type Mapping func(string) (string, bool) -// Substitute variables in the string with their values -func Substitute(template string, mapping Mapping, errOnMissingVariable bool) (string, error) { +// SubstituteFunc is a user-supplied function that apply substitution. +// Returns the value as a string, a bool indicating if the function could apply +// the substitution and an error. +type SubstituteFunc func(string, Mapping) (string, bool, error) + +// SubstituteWith subsitute variables in the string with their values. +// It accepts additional substitute function. +func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) { var err error result := pattern.ReplaceAllStringFunc(template, func(substring string) string { matches := pattern.FindStringSubmatch(substring) @@ -47,66 +61,92 @@ func Substitute(template string, mapping Mapping, errOnMissingVariable bool) (st substitution = groups["braced"] } - switch { - - case substitution == "": + if substitution == "" { err = &InvalidTemplateError{Template: template} return "" + } - // Soft default (fall back if unset or empty) - case strings.Contains(substitution, ":-"): - name, defaultValue := partition(substitution, ":-") - value, ok := mapping(name) - if !ok || value == "" { - return defaultValue - } - return value - - // Hard default (fall back if-and-only-if empty) - case strings.Contains(substitution, "-"): - name, defaultValue := partition(substitution, "-") - value, ok := mapping(name) - if !ok { - return defaultValue - } - return value - - case strings.Contains(substitution, ":?"): - name, errorMessage := partition(substitution, ":?") - value, ok := mapping(name) - if !ok || value == "" { - err = &InvalidTemplateError{ - Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage), - } + for _, f := range subsFuncs { + var ( + value string + applied bool + ) + value, applied, err = f(substitution, mapping) + if err != nil { return "" } - return value - - case strings.Contains(substitution, "?"): - name, errorMessage := partition(substitution, "?") - value, ok := mapping(name) - if !ok { - err = &InvalidTemplateError{ - Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage), - } - return "" + if !applied { + continue } return value } - value, found := mapping(substitution) - if !found && errOnMissingVariable { - err = &InvalidTemplateError{ - Template: fmt.Sprintf("required variable %s is missing a value", substitution), - } - return "" - } + value, _ := mapping(substitution) return value }) return result, err } +// Substitute variables in the string with their values +func Substitute(template string, mapping Mapping) (string, error) { + return SubstituteWith(template, mapping, pattern, DefaultSubstituteFuncs...) +} + +// Soft default (fall back if unset or empty) +func softDefault(substitution string, mapping Mapping) (string, bool, error) { + if !strings.Contains(substitution, ":-") { + return "", false, nil + } + name, defaultValue := partition(substitution, ":-") + value, ok := mapping(name) + if !ok || value == "" { + return defaultValue, true, nil + } + return value, true, nil +} + +// Hard default (fall back if-and-only-if empty) +func hardDefault(substitution string, mapping Mapping) (string, bool, error) { + if !strings.Contains(substitution, "-") { + return "", false, nil + } + name, defaultValue := partition(substitution, "-") + value, ok := mapping(name) + if !ok { + return defaultValue, true, nil + } + return value, true, nil +} + +func requiredNonEmpty(substitution string, mapping Mapping) (string, bool, error) { + if !strings.Contains(substitution, ":?") { + return "", false, nil + } + name, errorMessage := partition(substitution, ":?") + value, ok := mapping(name) + if !ok || value == "" { + return "", true, &InvalidTemplateError{ + Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage), + } + } + return value, true, nil +} + +func required(substitution string, mapping Mapping) (string, bool, error) { + if !strings.Contains(substitution, "?") { + return "", false, nil + } + name, errorMessage := partition(substitution, "?") + value, ok := mapping(name) + if !ok { + return "", true, &InvalidTemplateError{ + Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage), + } + } + return value, true, nil +} + func matchGroups(matches []string) map[string]string { groups := make(map[string]string) for i, name := range pattern.SubexpNames()[1:] { diff --git a/vendor/github.com/docker/cli/cli/compose/types/types.go b/vendor/github.com/docker/cli/cli/compose/types/types.go index b539028db..cad26d58b 100644 --- a/vendor/github.com/docker/cli/cli/compose/types/types.go +++ b/vendor/github.com/docker/cli/cli/compose/types/types.go @@ -56,11 +56,10 @@ type ConfigFile struct { // ConfigDetails are the details about a group of ConfigFiles type ConfigDetails struct { - Version string - WorkingDir string - ConfigFiles []ConfigFile - Environment map[string]string - ErrOnMissingVariable bool + Version string + WorkingDir string + ConfigFiles []ConfigFile + Environment map[string]string } // LookupEnv provides a lookup function for environment variables @@ -78,6 +77,7 @@ type Config struct { Volumes map[string]VolumeConfig `yaml:",omitempty"` Secrets map[string]SecretConfig `yaml:",omitempty"` Configs map[string]ConfigObjConfig `yaml:",omitempty"` + Extras map[string]interface{} `yaml:",inline"` } // Services is a list of ServiceConfig @@ -119,7 +119,9 @@ type ServiceConfig struct { Hostname string `yaml:",omitempty"` HealthCheck *HealthCheckConfig `yaml:",omitempty"` Image string `yaml:",omitempty"` + Init *bool `yaml:",omitempty"` Ipc string `yaml:",omitempty"` + Isolation string `mapstructure:"isolation" yaml:"isolation,omitempty"` Labels Labels `yaml:",omitempty"` Links []string `yaml:",omitempty"` Logging *LoggingConfig `yaml:",omitempty"` @@ -142,7 +144,8 @@ type ServiceConfig struct { User string `yaml:",omitempty"` Volumes []ServiceVolumeConfig `yaml:",omitempty"` WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty"` - Isolation string `mapstructure:"isolation" yaml:"isolation,omitempty"` + + Extras map[string]interface{} `yaml:",inline"` } // BuildConfig is a type for build @@ -191,14 +194,15 @@ type LoggingConfig struct { // DeployConfig the deployment configuration for a service type DeployConfig struct { - Mode string `yaml:",omitempty"` - Replicas *uint64 `yaml:",omitempty"` - Labels Labels `yaml:",omitempty"` - UpdateConfig *UpdateConfig `mapstructure:"update_config" yaml:"update_config,omitempty"` - Resources Resources `yaml:",omitempty"` - RestartPolicy *RestartPolicy `mapstructure:"restart_policy" yaml:"restart_policy,omitempty"` - Placement Placement `yaml:",omitempty"` - EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty"` + Mode string `yaml:",omitempty"` + Replicas *uint64 `yaml:",omitempty"` + Labels Labels `yaml:",omitempty"` + UpdateConfig *UpdateConfig `mapstructure:"update_config" yaml:"update_config,omitempty"` + RollbackConfig *UpdateConfig `mapstructure:"rollback_config" yaml:"rollback_config,omitempty"` + Resources Resources `yaml:",omitempty"` + RestartPolicy *RestartPolicy `mapstructure:"restart_policy" yaml:"restart_policy,omitempty"` + Placement Placement `yaml:",omitempty"` + EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty"` } // HealthCheckConfig the healthcheck configuration for a service @@ -354,14 +358,15 @@ func (u *UlimitsConfig) MarshalYAML() (interface{}, error) { // NetworkConfig for a network type NetworkConfig struct { - Name string `yaml:",omitempty"` - Driver string `yaml:",omitempty"` - DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty"` - Ipam IPAMConfig `yaml:",omitempty"` - External External `yaml:",omitempty"` - Internal bool `yaml:",omitempty"` - Attachable bool `yaml:",omitempty"` - Labels Labels `yaml:",omitempty"` + Name string `yaml:",omitempty"` + Driver string `yaml:",omitempty"` + DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty"` + Ipam IPAMConfig `yaml:",omitempty"` + External External `yaml:",omitempty"` + Internal bool `yaml:",omitempty"` + Attachable bool `yaml:",omitempty"` + Labels Labels `yaml:",omitempty"` + Extras map[string]interface{} `yaml:",inline"` } // IPAMConfig for a network @@ -377,11 +382,12 @@ type IPAMPool struct { // VolumeConfig for a volume type VolumeConfig struct { - Name string `yaml:",omitempty"` - Driver string `yaml:",omitempty"` - DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty"` - External External `yaml:",omitempty"` - Labels Labels `yaml:",omitempty"` + Name string `yaml:",omitempty"` + Driver string `yaml:",omitempty"` + DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty"` + External External `yaml:",omitempty"` + Labels Labels `yaml:",omitempty"` + Extras map[string]interface{} `yaml:",inline"` } // External identifies a Volume or Network as a reference to a resource that is @@ -408,10 +414,11 @@ type CredentialSpecConfig struct { // FileObjectConfig is a config type for a file used by a service type FileObjectConfig struct { - Name string `yaml:",omitempty"` - File string `yaml:",omitempty"` - External External `yaml:",omitempty"` - Labels Labels `yaml:",omitempty"` + Name string `yaml:",omitempty"` + File string `yaml:",omitempty"` + External External `yaml:",omitempty"` + Labels Labels `yaml:",omitempty"` + Extras map[string]interface{} `yaml:",inline"` } // SecretConfig for a secret diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index 565330662..37e1533f4 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -3,6 +3,7 @@ package configfile import ( "encoding/base64" "encoding/json" + "fmt" "io" "io/ioutil" "os" @@ -45,7 +46,8 @@ type ConfigFile struct { PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` Experimental string `json:"experimental,omitempty"` - Orchestrator string `json:"orchestrator,omitempty"` + StackOrchestrator string `json:"stackOrchestrator,omitempty"` + Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` } // ProxyConfig contains proxy configuration settings @@ -56,6 +58,11 @@ type ProxyConfig struct { FTPProxy string `json:"ftpProxy,omitempty"` } +// KubernetesConfig contains Kubernetes orchestrator settings +type KubernetesConfig struct { + AllNamespaces string `json:"allNamespaces,omitempty"` +} + // New initializes an empty configuration file for the given filename 'fn' func New(fn string) *ConfigFile { return &ConfigFile{ @@ -119,7 +126,7 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { ac.ServerAddress = addr configFile.AuthConfigs[addr] = ac } - return nil + return checkKubernetesConfiguration(configFile.Kubernetes) } // ContainsAuth returns whether there is authentication configured @@ -312,3 +319,17 @@ func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, func (configFile *ConfigFile) GetFilename() string { return configFile.Filename } + +func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { + if kubeConfig == nil { + return nil + } + switch kubeConfig.AllNamespaces { + case "": + case "enabled": + case "disabled": + default: + return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go index 9b619c027..a9012c6d4 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go @@ -1,11 +1,11 @@ package credentials import ( - "github.com/docker/docker-credential-helpers/pass" + "os/exec" ) func defaultCredentialsStore() string { - if pass.PassInitialized { + if _, err := exec.LookPath("pass"); err == nil { return "pass" } diff --git a/vendor/github.com/docker/cli/cli/flags/client.go b/vendor/github.com/docker/cli/cli/flags/client.go index 9b6940f6b..c57879e6a 100644 --- a/vendor/github.com/docker/cli/cli/flags/client.go +++ b/vendor/github.com/docker/cli/cli/flags/client.go @@ -4,7 +4,6 @@ package flags type ClientOptions struct { Common *CommonOptions ConfigDir string - Version bool } // NewClientOptions returns a new ClientOptions diff --git a/vendor/github.com/docker/cli/cli/flags/common.go b/vendor/github.com/docker/cli/cli/flags/common.go index 6a9e79078..3834097c3 100644 --- a/vendor/github.com/docker/cli/cli/flags/common.go +++ b/vendor/github.com/docker/cli/cli/flags/common.go @@ -31,13 +31,12 @@ var ( // CommonOptions are options common to both the client and the daemon. type CommonOptions struct { - Debug bool - Hosts []string - Orchestrator string - LogLevel string - TLS bool - TLSVerify bool - TLSOptions *tlsconfig.Options + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options } // NewCommonOptions returns a new CommonOptions @@ -55,8 +54,6 @@ func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) { flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`) flags.BoolVar(&commonOpts.TLS, "tls", dockerTLS, "Use TLS; implied by --tlsverify") flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") - flags.StringVar(&commonOpts.Orchestrator, "orchestrator", "", "Which orchestrator to use with the docker cli (swarm|kubernetes) (default swarm) (experimental)") - flags.SetAnnotation("orchestrator", "experimentalCLI", nil) // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") diff --git a/vendor/github.com/docker/cli/cli/registry/client/client.go b/vendor/github.com/docker/cli/cli/registry/client/client.go index 19d45a55f..35a110254 100644 --- a/vendor/github.com/docker/cli/cli/registry/client/client.go +++ b/vendor/github.com/docker/cli/cli/registry/client/client.go @@ -1,6 +1,7 @@ package client import ( + "context" "fmt" "net/http" "strings" @@ -14,7 +15,6 @@ import ( "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/net/context" ) // RegistryClient is a client used to communicate with a Docker distribution @@ -133,7 +133,7 @@ func (c *client) getRepositoryForReference(ctx context.Context, ref reference.Na if err != nil { return nil, errors.Wrapf(err, "failed to parse repo name from %s", ref) } - return distributionclient.NewRepository(ctx, repoName, repoEndpoint.BaseURL(), httpTransport) + return distributionclient.NewRepository(repoName, repoEndpoint.BaseURL(), httpTransport) } func (c *client) getHTTPTransportForRepoEndpoint(ctx context.Context, repoEndpoint repositoryEndpoint) (http.RoundTripper, error) { diff --git a/vendor/github.com/docker/cli/cli/registry/client/endpoint.go b/vendor/github.com/docker/cli/cli/registry/client/endpoint.go index a2d9c3359..5af00ca70 100644 --- a/vendor/github.com/docker/cli/cli/registry/client/endpoint.go +++ b/vendor/github.com/docker/cli/cli/registry/client/endpoint.go @@ -102,7 +102,7 @@ func getHTTPTransport(authConfig authtypes.AuthConfig, endpoint registry.APIEndp modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) } else { creds := registry.NewStaticCredentialStore(&authConfig) - tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, "*") + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, "push", "pull") basicHandler := auth.NewBasicHandler(creds) modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) } diff --git a/vendor/github.com/docker/cli/cli/registry/client/fetcher.go b/vendor/github.com/docker/cli/cli/registry/client/fetcher.go index 796bc01eb..aea50a31f 100644 --- a/vendor/github.com/docker/cli/cli/registry/client/fetcher.go +++ b/vendor/github.com/docker/cli/cli/registry/client/fetcher.go @@ -1,6 +1,7 @@ package client import ( + "context" "fmt" "github.com/docker/cli/cli/manifest/types" @@ -15,7 +16,6 @@ import ( digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/net/context" ) // fetchManifest pulls a manifest from a registry and returns it. An error diff --git a/vendor/github.com/docker/cli/cli/trust/trust.go b/vendor/github.com/docker/cli/cli/trust/trust.go index 6b3f321da..df11227e9 100644 --- a/vendor/github.com/docker/cli/cli/trust/trust.go +++ b/vendor/github.com/docker/cli/cli/trust/trust.go @@ -1,6 +1,7 @@ package trust import ( + "context" "encoding/json" "io" "net" @@ -31,7 +32,6 @@ import ( "github.com/theupdateframework/notary/trustpinning" "github.com/theupdateframework/notary/tuf/data" "github.com/theupdateframework/notary/tuf/signed" - "golang.org/x/net/context" ) var ( diff --git a/vendor/github.com/docker/cli/kubernetes/client/clientset/scheme/register.go b/vendor/github.com/docker/cli/kubernetes/client/clientset/scheme/register.go index 9298122bb..ffd6e4f00 100644 --- a/vendor/github.com/docker/cli/kubernetes/client/clientset/scheme/register.go +++ b/vendor/github.com/docker/cli/kubernetes/client/clientset/scheme/register.go @@ -3,10 +3,10 @@ package scheme import ( composev1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1" composev1beta2 "github.com/docker/cli/kubernetes/compose/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" ) // Variables required for registration diff --git a/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1/compose_client.go b/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1/compose_client.go index 57e836f8d..4039d4de1 100644 --- a/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1/compose_client.go +++ b/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1/compose_client.go @@ -2,9 +2,9 @@ package v1beta1 import ( "github.com/docker/cli/kubernetes/client/clientset/scheme" - v1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" + "github.com/docker/cli/kubernetes/compose/v1beta1" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" ) // ComposeV1beta1Interface defines the methods a compose v1beta1 client has diff --git a/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1/stack.go b/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1/stack.go index c3f13811f..1fbe53f81 100644 --- a/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1/stack.go +++ b/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1/stack.go @@ -1,12 +1,12 @@ package v1beta1 import ( - scheme "github.com/docker/cli/kubernetes/client/clientset/scheme" - v1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + "github.com/docker/cli/kubernetes/client/clientset/scheme" + "github.com/docker/cli/kubernetes/compose/v1beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" ) // StacksGetter has a method to return a StackInterface. diff --git a/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2/compose_client.go b/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2/compose_client.go index cbb6fa59a..34bee389c 100644 --- a/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2/compose_client.go +++ b/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2/compose_client.go @@ -2,9 +2,9 @@ package v1beta2 import ( "github.com/docker/cli/kubernetes/client/clientset/scheme" - v1beta2 "github.com/docker/cli/kubernetes/compose/v1beta2" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" + "github.com/docker/cli/kubernetes/compose/v1beta2" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" ) // ComposeV1beta2Interface defines the methods a compose v1beta2 client has diff --git a/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2/stack.go b/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2/stack.go index 84ac41e6d..6937b4bfb 100644 --- a/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2/stack.go +++ b/vendor/github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2/stack.go @@ -1,12 +1,12 @@ package v1beta2 import ( - scheme "github.com/docker/cli/kubernetes/client/clientset/scheme" - v1beta2 "github.com/docker/cli/kubernetes/compose/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + "github.com/docker/cli/kubernetes/client/clientset/scheme" + "github.com/docker/cli/kubernetes/compose/v1beta2" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" ) // StacksGetter has a method to return a StackInterface. diff --git a/vendor/github.com/docker/cli/kubernetes/compose/v1beta1/doc.go b/vendor/github.com/docker/cli/kubernetes/compose/v1beta1/doc.go index 18e8828b8..c5f5dcbb0 100644 --- a/vendor/github.com/docker/cli/kubernetes/compose/v1beta1/doc.go +++ b/vendor/github.com/docker/cli/kubernetes/compose/v1beta1/doc.go @@ -2,9 +2,10 @@ // backward compatibility by support multiple concurrent versions // of the same resource -// Package v1beta1 is the first version of the Stack spec, containing only a compose file // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:defaulter-gen=TypeMeta // +groupName=compose.docker.com + +// Package v1beta1 is the first version of the Stack spec, containing only a compose file package v1beta1 // import "github.com/docker/cli/kubernetes/compose/v1beta1" diff --git a/vendor/github.com/docker/cli/kubernetes/compose/v1beta1/parsing.go b/vendor/github.com/docker/cli/kubernetes/compose/v1beta1/parsing.go new file mode 100644 index 000000000..634a8428a --- /dev/null +++ b/vendor/github.com/docker/cli/kubernetes/compose/v1beta1/parsing.go @@ -0,0 +1,4 @@ +package v1beta1 + +// MaxComposeVersion is the most recent version of compose file Schema supported in v1beta1 +const MaxComposeVersion = "3.5" diff --git a/vendor/github.com/docker/cli/kubernetes/compose/v1beta2/stack.go b/vendor/github.com/docker/cli/kubernetes/compose/v1beta2/stack.go index a14a698dc..6c90cdf12 100644 --- a/vendor/github.com/docker/cli/kubernetes/compose/v1beta2/stack.go +++ b/vendor/github.com/docker/cli/kubernetes/compose/v1beta2/stack.go @@ -17,8 +17,8 @@ type StackList struct { // Stack is v1beta2's representation of a Stack type Stack struct { - metav1.TypeMeta `json:",inline" yaml:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` Spec *StackSpec `json:"spec,omitempty"` Status *StackStatus `json:"status,omitempty"` @@ -70,47 +70,47 @@ type StackSpec struct { type ServiceConfig struct { Name string `json:"name,omitempty"` - CapAdd []string `json:"cap_add,omitempty" yaml:"cap_add,omitempty"` - CapDrop []string `json:"cap_drop,omitempty" yaml:"cap_drop,omitempty"` - Command []string `json:"command,omitempty" yaml:"command,omitempty"` - Configs []ServiceConfigObjConfig `json:"configs,omitempty" yaml:"configs,omitempty"` - Deploy DeployConfig `json:"deploy,omitempty" yaml:"deploy,omitempty"` - Entrypoint []string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"` - Environment map[string]*string `json:"environment,omitempty" yaml:"environment,omitempty"` - ExtraHosts []string `json:"extra_hosts,omitempty" yaml:"extra_hosts,omitempty"` - Hostname string `json:"hostname,omitempty" yaml:"hostname,omitempty"` - HealthCheck *HealthCheckConfig `json:"health_check,omitempty" yaml:"health_check,omitempty"` - Image string `json:"image,omitempty" yaml:"image,omitempty"` - Ipc string `json:"ipc,omitempty" yaml:"ipc,omitempty"` - Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - Pid string `json:"pid,omitempty" yaml:"pid,omitempty"` - Ports []ServicePortConfig `json:"ports,omitempty" yaml:"ports,omitempty"` - Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` - ReadOnly bool `json:"read_only,omitempty" yaml:"read_only,omitempty"` - Secrets []ServiceSecretConfig `json:"secrets,omitempty" yaml:"secrets,omitempty"` - StdinOpen bool `json:"stdin_open,omitempty" yaml:"stdin_open,omitempty"` - StopGracePeriod *time.Duration `json:"stop_grace_period,omitempty" yaml:"stop_grace_period,omitempty"` - Tmpfs []string `json:"tmpfs,omitempty" yaml:"tmpfs,omitempty"` - Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"` - User *int64 `json:"user,omitempty" yaml:"user,omitempty"` - Volumes []ServiceVolumeConfig `json:"volumes,omitempty" yaml:"volumes,omitempty"` - WorkingDir string `json:"working_dir,omitempty" yaml:"working_dir,omitempty"` + CapAdd []string `json:"cap_add,omitempty"` + CapDrop []string `json:"cap_drop,omitempty"` + Command []string `json:"command,omitempty"` + Configs []ServiceConfigObjConfig `json:"configs,omitempty"` + Deploy DeployConfig `json:"deploy,omitempty"` + Entrypoint []string `json:"entrypoint,omitempty"` + Environment map[string]*string `json:"environment,omitempty"` + ExtraHosts []string `json:"extra_hosts,omitempty"` + Hostname string `json:"hostname,omitempty"` + HealthCheck *HealthCheckConfig `json:"health_check,omitempty"` + Image string `json:"image,omitempty"` + Ipc string `json:"ipc,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Pid string `json:"pid,omitempty"` + Ports []ServicePortConfig `json:"ports,omitempty"` + Privileged bool `json:"privileged,omitempty"` + ReadOnly bool `json:"read_only,omitempty"` + Secrets []ServiceSecretConfig `json:"secrets,omitempty"` + StdinOpen bool `json:"stdin_open,omitempty"` + StopGracePeriod *time.Duration `json:"stop_grace_period,omitempty"` + Tmpfs []string `json:"tmpfs,omitempty"` + Tty bool `json:"tty,omitempty"` + User *int64 `json:"user,omitempty"` + Volumes []ServiceVolumeConfig `json:"volumes,omitempty"` + WorkingDir string `json:"working_dir,omitempty"` } // ServicePortConfig is the port configuration for a service type ServicePortConfig struct { - Mode string `json:"mode,omitempty" yaml:"mode,omitempty"` - Target uint32 `json:"target,omitempty" yaml:"target,omitempty"` - Published uint32 `json:"published,omitempty" yaml:"published,omitempty"` - Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"` + Mode string `json:"mode,omitempty"` + Target uint32 `json:"target,omitempty"` + Published uint32 `json:"published,omitempty"` + Protocol string `json:"protocol,omitempty"` } // FileObjectConfig is a config type for a file used by a service type FileObjectConfig struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - File string `json:"file,omitempty" yaml:"file,omitempty"` - External External `json:"external,omitempty" yaml:"external,omitempty"` - Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + Name string `json:"name,omitempty"` + File string `json:"file,omitempty"` + External External `json:"external,omitempty"` + Labels map[string]string `json:"labels,omitempty"` } // SecretConfig for a secret @@ -123,17 +123,17 @@ type ConfigObjConfig FileObjectConfig // not managed, and should already exist. // External.name is deprecated and replaced by Volume.name type External struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - External bool `json:"external,omitempty" yaml:"external,omitempty"` + Name string `json:"name,omitempty"` + External bool `json:"external,omitempty"` } // FileReferenceConfig for a reference to a swarm file object type FileReferenceConfig struct { - Source string `json:"source,omitempty" yaml:"source,omitempty"` - Target string `json:"target,omitempty" yaml:"target,omitempty"` - UID string `json:"uid,omitempty" yaml:"uid,omitempty"` - GID string `json:"gid,omitempty" yaml:"gid,omitempty"` - Mode *uint32 `json:"mode,omitempty" yaml:"mode,omitempty"` + Source string `json:"source,omitempty"` + Target string `json:"target,omitempty"` + UID string `json:"uid,omitempty"` + GID string `json:"gid,omitempty"` + Mode *uint32 `json:"mode,omitempty"` } // ServiceConfigObjConfig is the config obj configuration for a service @@ -144,40 +144,40 @@ type ServiceSecretConfig FileReferenceConfig // DeployConfig is the deployment configuration for a service type DeployConfig struct { - Mode string `json:"mode,omitempty" yaml:"mode,omitempty"` - Replicas *uint64 `json:"replicas,omitempty" yaml:"replicas,omitempty"` - Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - UpdateConfig *UpdateConfig `json:"update_config,omitempty" yaml:"update_config,omitempty"` - Resources Resources `json:"resources,omitempty" yaml:"resources,omitempty"` - RestartPolicy *RestartPolicy `json:"restart_policy,omitempty" yaml:"restart_policy,omitempty"` - Placement Placement `json:"placement,omitempty" yaml:"placement,omitempty"` + Mode string `json:"mode,omitempty"` + Replicas *uint64 `json:"replicas,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + UpdateConfig *UpdateConfig `json:"update_config,omitempty"` + Resources Resources `json:"resources,omitempty"` + RestartPolicy *RestartPolicy `json:"restart_policy,omitempty"` + Placement Placement `json:"placement,omitempty"` } // UpdateConfig is the service update configuration type UpdateConfig struct { - Parallelism *uint64 `json:"paralellism,omitempty" yaml:"paralellism,omitempty"` + Parallelism *uint64 `json:"paralellism,omitempty"` } // Resources the resource limits and reservations type Resources struct { - Limits *Resource `json:"limits,omitempty" yaml:"limits,omitempty"` - Reservations *Resource `json:"reservations,omitempty" yaml:"reservations,omitempty"` + Limits *Resource `json:"limits,omitempty"` + Reservations *Resource `json:"reservations,omitempty"` } // Resource is a resource to be limited or reserved type Resource struct { - NanoCPUs string `json:"cpus,omitempty" yaml:"cpus,omitempty"` - MemoryBytes int64 `json:"memory,omitempty" yaml:"memory,omitempty"` + NanoCPUs string `json:"cpus,omitempty"` + MemoryBytes int64 `json:"memory,omitempty"` } // RestartPolicy is the service restart policy type RestartPolicy struct { - Condition string `json:"condition,omitempty" yaml:"condition,omitempty"` + Condition string `json:"condition,omitempty"` } // Placement constraints for the service type Placement struct { - Constraints *Constraints `json:"constraints,omitempty" yaml:"constraints,omitempty"` + Constraints *Constraints `json:"constraints,omitempty"` } // Constraints lists constraints that can be set on the service @@ -196,18 +196,18 @@ type Constraint struct { // HealthCheckConfig the healthcheck configuration for a service type HealthCheckConfig struct { - Test []string `json:"test,omitempty" yaml:"test,omitempty"` - Timeout *time.Duration `json:"timeout,omitempty" yaml:"timeout,omitempty"` - Interval *time.Duration `json:"interval,omitempty" yaml:"interval,omitempty"` - Retries *uint64 `json:"retries,omitempty" yaml:"retries,omitempty"` + Test []string `json:"test,omitempty"` + Timeout *time.Duration `json:"timeout,omitempty"` + Interval *time.Duration `json:"interval,omitempty"` + Retries *uint64 `json:"retries,omitempty"` } // ServiceVolumeConfig are references to a volume used by a service type ServiceVolumeConfig struct { - Type string `json:"type,omitempty" yaml:"type,omitempty"` - Source string `json:"source,omitempty" yaml:"source,omitempty"` - Target string `json:"target,omitempty" yaml:"target,omitempty"` - ReadOnly bool `json:"read_only,omitempty" yaml:"read_only,omitempty"` + Type string `json:"type,omitempty"` + Source string `json:"source,omitempty"` + Target string `json:"target,omitempty"` + ReadOnly bool `json:"read_only,omitempty"` } func (s *StackSpec) clone() *StackSpec { diff --git a/vendor/github.com/docker/cli/kubernetes/config.go b/vendor/github.com/docker/cli/kubernetes/config.go index 3a9fdfc6d..287a6a6bf 100644 --- a/vendor/github.com/docker/cli/kubernetes/config.go +++ b/vendor/github.com/docker/cli/kubernetes/config.go @@ -5,14 +5,13 @@ import ( "path/filepath" "github.com/docker/docker/pkg/homedir" - restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) -// NewKubernetesConfig resolves the path to the desired Kubernetes configuration file, depending -// environment variable and command line flag. -func NewKubernetesConfig(configFlag string) (*restclient.Config, error) { - kubeConfig := configFlag +// NewKubernetesConfig resolves the path to the desired Kubernetes configuration file based on +// the KUBECONFIG environment variable and command line flags. +func NewKubernetesConfig(configPath string) clientcmd.ClientConfig { + kubeConfig := configPath if kubeConfig == "" { if config := os.Getenv("KUBECONFIG"); config != "" { kubeConfig = config @@ -20,5 +19,8 @@ func NewKubernetesConfig(configFlag string) (*restclient.Config, error) { kubeConfig = filepath.Join(homedir.Get(), ".kube/config") } } - return clientcmd.BuildConfigFromFlags("", kubeConfig) + + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfig}, + &clientcmd.ConfigOverrides{}) } diff --git a/vendor/github.com/docker/cli/opts/opts.go b/vendor/github.com/docker/cli/opts/opts.go index d915ec2f7..51519e03b 100644 --- a/vendor/github.com/docker/cli/opts/opts.go +++ b/vendor/github.com/docker/cli/opts/opts.go @@ -6,6 +6,7 @@ import ( "net" "path" "regexp" + "strconv" "strings" "github.com/docker/docker/api/types/filters" @@ -486,3 +487,38 @@ func (m *MemSwapBytes) UnmarshalJSON(s []byte) error { b := MemBytes(*m) return b.UnmarshalJSON(s) } + +// NullableBool is a type for tri-state boolean options +type NullableBool struct { + b *bool +} + +// Type returns the type +func (n *NullableBool) Type() string { + return "" +} + +// Value returns the value in *bool +func (n *NullableBool) Value() *bool { + return n.b +} + +// Set sets the value. If value is empty string or "auto", nil is set. +// Otherwise true or false are set based on flag.Bool behavior. +func (n *NullableBool) Set(value string) error { + if value != "auto" && value != "" { + b, err := strconv.ParseBool(value) + if err != nil { + return err + } + n.b = &b + } + return nil +} + +func (n *NullableBool) String() string { + if n.b == nil { + return "auto" + } + return strconv.FormatBool(*n.b) +} diff --git a/vendor/github.com/docker/cli/opts/port.go b/vendor/github.com/docker/cli/opts/port.go index 5a5a18738..201aefafc 100644 --- a/vendor/github.com/docker/cli/opts/port.go +++ b/vendor/github.com/docker/cli/opts/port.go @@ -9,6 +9,7 @@ import ( "github.com/docker/docker/api/types/swarm" "github.com/docker/go-connections/nat" + "github.com/sirupsen/logrus" ) const ( @@ -147,6 +148,9 @@ func ConvertPortToPortConfig( ports := []swarm.PortConfig{} for _, binding := range portBindings[port] { + if binding.HostIP != "" && binding.HostIP != "0.0.0.0" { + logrus.Warnf("ignoring IP-address (%s:%s:%s) service will listen on '0.0.0.0'", binding.HostIP, binding.HostPort, port) + } hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) if err != nil && binding.HostPort != "" { return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port()) diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index 01d309029..145b07853 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -1,13 +1,13 @@ package distribution import ( + "context" "errors" "fmt" "io" "net/http" "time" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go deleted file mode 100644 index 23cbf5b54..000000000 --- a/vendor/github.com/docker/distribution/context/context.go +++ /dev/null @@ -1,85 +0,0 @@ -package context - -import ( - "sync" - - "github.com/docker/distribution/uuid" - "golang.org/x/net/context" -) - -// Context is a copy of Context from the golang.org/x/net/context package. -type Context interface { - context.Context -} - -// instanceContext is a context that provides only an instance id. It is -// provided as the main background context. -type instanceContext struct { - Context - id string // id of context, logged as "instance.id" - once sync.Once // once protect generation of the id -} - -func (ic *instanceContext) Value(key interface{}) interface{} { - if key == "instance.id" { - ic.once.Do(func() { - // We want to lazy initialize the UUID such that we don't - // call a random generator from the package initialization - // code. For various reasons random could not be available - // https://github.com/docker/distribution/issues/782 - ic.id = uuid.Generate().String() - }) - return ic.id - } - - return ic.Context.Value(key) -} - -var background = &instanceContext{ - Context: context.Background(), -} - -// Background returns a non-nil, empty Context. The background context -// provides a single key, "instance.id" that is globally unique to the -// process. -func Background() Context { - return background -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. Use context Values only for request-scoped data that transits processes -// and APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key, val interface{}) Context { - return context.WithValue(parent, key, val) -} - -// stringMapContext is a simple context implementation that checks a map for a -// key, falling back to a parent if not present. -type stringMapContext struct { - context.Context - m map[string]interface{} -} - -// WithValues returns a context that proxies lookups through a map. Only -// supports string keys. -func WithValues(ctx context.Context, m map[string]interface{}) context.Context { - mo := make(map[string]interface{}, len(m)) // make our own copy. - for k, v := range m { - mo[k] = v - } - - return stringMapContext{ - Context: ctx, - m: mo, - } -} - -func (smc stringMapContext) Value(key interface{}) interface{} { - if ks, ok := key.(string); ok { - if v, ok := smc.m[ks]; ok { - return v - } - } - - return smc.Context.Value(key) -} diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go deleted file mode 100644 index 9b623074e..000000000 --- a/vendor/github.com/docker/distribution/context/doc.go +++ /dev/null @@ -1,89 +0,0 @@ -// Package context provides several utilities for working with -// golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevant request information but this package is not limited to -// that purpose. -// -// The easiest way to get started is to get the background context: -// -// ctx := context.Background() -// -// The returned context should be passed around your application and be the -// root of all other context instances. If the application has a version, this -// line should be called before anything else: -// -// ctx := context.WithVersion(context.Background(), version) -// -// The above will store the version in the context and will be available to -// the logger. -// -// Logging -// -// The most useful aspect of this package is GetLogger. This function takes -// any context.Context interface and returns the current logger from the -// context. Canonical usage looks like this: -// -// GetLogger(ctx).Infof("something interesting happened") -// -// GetLogger also takes optional key arguments. The keys will be looked up in -// the context and reported with the logger. The following example would -// return a logger that prints the version with each log message: -// -// ctx := context.Context(context.Background(), "version", version) -// GetLogger(ctx, "version").Infof("this log message has a version field") -// -// The above would print out a log message like this: -// -// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m -// -// When used with WithLogger, we gain the ability to decorate the context with -// loggers that have information from disparate parts of the call stack. -// Following from the version example, we can build a new context with the -// configured logger such that we always print the version field: -// -// ctx = WithLogger(ctx, GetLogger(ctx, "version")) -// -// Since the logger has been pushed to the context, we can now get the version -// field for free with our log messages. Future calls to GetLogger on the new -// context will have the version field: -// -// GetLogger(ctx).Infof("this log message has a version field") -// -// This becomes more powerful when we start stacking loggers. Let's say we -// have the version logger from above but also want a request id. Using the -// context above, in our request scoped function, we place another logger in -// the context: -// -// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context -// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) -// -// When GetLogger is called on the new context, "http.request.id" will be -// included as a logger field, along with the original "version" field: -// -// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m -// -// Note that this only affects the new context, the previous context, with the -// version field, can be used independently. Put another way, the new logger, -// added to the request context, is unique to that context and can have -// request scoped variables. -// -// HTTP Requests -// -// This package also contains several methods for working with http requests. -// The concepts are very similar to those described above. We simply place the -// request in the context using WithRequest. This makes the request variables -// available. GetRequestLogger can then be called to get request specific -// variables in a log line: -// -// ctx = WithRequest(ctx, req) -// GetRequestLogger(ctx).Infof("request variables") -// -// Like above, if we want to include the request data in all log messages in -// the context, we push the logger to a new context and use that one: -// -// ctx = WithLogger(ctx, GetRequestLogger(ctx)) -// -// The concept is fairly powerful and ensures that calls throughout the stack -// can be traced in log messages. Using the fields like "http.request.id", one -// can analyze call flow for a particular request with a simple grep of the -// logs. -package context diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go deleted file mode 100644 index 7d65c8524..000000000 --- a/vendor/github.com/docker/distribution/context/http.go +++ /dev/null @@ -1,366 +0,0 @@ -package context - -import ( - "errors" - "net" - "net/http" - "strings" - "sync" - "time" - - "github.com/docker/distribution/uuid" - "github.com/gorilla/mux" - log "github.com/sirupsen/logrus" -) - -// Common errors used with this package. -var ( - ErrNoRequestContext = errors.New("no http request in context") - ErrNoResponseWriterContext = errors.New("no http response in context") -) - -func parseIP(ipStr string) net.IP { - ip := net.ParseIP(ipStr) - if ip == nil { - log.Warnf("invalid remote IP address: %q", ipStr) - } - return ip -} - -// RemoteAddr extracts the remote address of the request, taking into -// account proxy headers. -func RemoteAddr(r *http.Request) string { - if prior := r.Header.Get("X-Forwarded-For"); prior != "" { - proxies := strings.Split(prior, ",") - if len(proxies) > 0 { - remoteAddr := strings.Trim(proxies[0], " ") - if parseIP(remoteAddr) != nil { - return remoteAddr - } - } - } - // X-Real-Ip is less supported, but worth checking in the - // absence of X-Forwarded-For - if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { - if parseIP(realIP) != nil { - return realIP - } - } - - return r.RemoteAddr -} - -// RemoteIP extracts the remote IP of the request, taking into -// account proxy headers. -func RemoteIP(r *http.Request) string { - addr := RemoteAddr(r) - - // Try parsing it as "IP:port" - if ip, _, err := net.SplitHostPort(addr); err == nil { - return ip - } - - return addr -} - -// WithRequest places the request on the context. The context of the request -// is assigned a unique id, available at "http.request.id". The request itself -// is available at "http.request". Other common attributes are available under -// the prefix "http.request.". If a request is already present on the context, -// this method will panic. -func WithRequest(ctx Context, r *http.Request) Context { - if ctx.Value("http.request") != nil { - // NOTE(stevvooe): This needs to be considered a programming error. It - // is unlikely that we'd want to have more than one request in - // context. - panic("only one request per context") - } - - return &httpRequestContext{ - Context: ctx, - startedAt: time.Now(), - id: uuid.Generate().String(), - r: r, - } -} - -// GetRequest returns the http request in the given context. Returns -// ErrNoRequestContext if the context does not have an http request associated -// with it. -func GetRequest(ctx Context) (*http.Request, error) { - if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { - return r, nil - } - return nil, ErrNoRequestContext -} - -// GetRequestID attempts to resolve the current request id, if possible. An -// error is return if it is not available on the context. -func GetRequestID(ctx Context) string { - return GetStringValue(ctx, "http.request.id") -} - -// WithResponseWriter returns a new context and response writer that makes -// interesting response statistics available within the context. -func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { - if closeNotifier, ok := w.(http.CloseNotifier); ok { - irwCN := &instrumentedResponseWriterCN{ - instrumentedResponseWriter: instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - }, - CloseNotifier: closeNotifier, - } - - return irwCN, irwCN - } - - irw := instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - } - return &irw, &irw -} - -// GetResponseWriter returns the http.ResponseWriter from the provided -// context. If not present, ErrNoResponseWriterContext is returned. The -// returned instance provides instrumentation in the context. -func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { - v := ctx.Value("http.response") - - rw, ok := v.(http.ResponseWriter) - if !ok || rw == nil { - return nil, ErrNoResponseWriterContext - } - - return rw, nil -} - -// getVarsFromRequest let's us change request vars implementation for testing -// and maybe future changes. -var getVarsFromRequest = mux.Vars - -// WithVars extracts gorilla/mux vars and makes them available on the returned -// context. Variables are available at keys with the prefix "vars.". For -// example, if looking for the variable "name", it can be accessed as -// "vars.name". Implementations that are accessing values need not know that -// the underlying context is implemented with gorilla/mux vars. -func WithVars(ctx Context, r *http.Request) Context { - return &muxVarsContext{ - Context: ctx, - vars: getVarsFromRequest(r), - } -} - -// GetRequestLogger returns a logger that contains fields from the request in -// the current context. If the request is not available in the context, no -// fields will display. Request loggers can safely be pushed onto the context. -func GetRequestLogger(ctx Context) Logger { - return GetLogger(ctx, - "http.request.id", - "http.request.method", - "http.request.host", - "http.request.uri", - "http.request.referer", - "http.request.useragent", - "http.request.remoteaddr", - "http.request.contenttype") -} - -// GetResponseLogger reads the current response stats and builds a logger. -// Because the values are read at call time, pushing a logger returned from -// this function on the context will lead to missing or invalid data. Only -// call this at the end of a request, after the response has been written. -func GetResponseLogger(ctx Context) Logger { - l := getLogrusLogger(ctx, - "http.response.written", - "http.response.status", - "http.response.contenttype") - - duration := Since(ctx, "http.request.startedat") - - if duration > 0 { - l = l.WithField("http.response.duration", duration.String()) - } - - return l -} - -// httpRequestContext makes information about a request available to context. -type httpRequestContext struct { - Context - - startedAt time.Time - id string - r *http.Request -} - -// Value returns a keyed element of the request for use in the context. To get -// the request itself, query "request". For other components, access them as -// "request.". For example, r.RequestURI -func (ctx *httpRequestContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.request" { - return ctx.r - } - - if !strings.HasPrefix(keyStr, "http.request.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - switch parts[2] { - case "uri": - return ctx.r.RequestURI - case "remoteaddr": - return RemoteAddr(ctx.r) - case "method": - return ctx.r.Method - case "host": - return ctx.r.Host - case "referer": - referer := ctx.r.Referer() - if referer != "" { - return referer - } - case "useragent": - return ctx.r.UserAgent() - case "id": - return ctx.id - case "startedat": - return ctx.startedAt - case "contenttype": - ct := ctx.r.Header.Get("Content-Type") - if ct != "" { - return ct - } - } - } - -fallback: - return ctx.Context.Value(key) -} - -type muxVarsContext struct { - Context - vars map[string]string -} - -func (ctx *muxVarsContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "vars" { - return ctx.vars - } - - if strings.HasPrefix(keyStr, "vars.") { - keyStr = strings.TrimPrefix(keyStr, "vars.") - } - - if v, ok := ctx.vars[keyStr]; ok { - return v - } - } - - return ctx.Context.Value(key) -} - -// instrumentedResponseWriterCN provides response writer information in a -// context. It implements http.CloseNotifier so that users can detect -// early disconnects. -type instrumentedResponseWriterCN struct { - instrumentedResponseWriter - http.CloseNotifier -} - -// instrumentedResponseWriter provides response writer information in a -// context. This variant is only used in the case where CloseNotifier is not -// implemented by the parent ResponseWriter. -type instrumentedResponseWriter struct { - http.ResponseWriter - Context - - mu sync.Mutex - status int - written int64 -} - -func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { - n, err = irw.ResponseWriter.Write(p) - - irw.mu.Lock() - irw.written += int64(n) - - // Guess the likely status if not set. - if irw.status == 0 { - irw.status = http.StatusOK - } - - irw.mu.Unlock() - - return -} - -func (irw *instrumentedResponseWriter) WriteHeader(status int) { - irw.ResponseWriter.WriteHeader(status) - - irw.mu.Lock() - irw.status = status - irw.mu.Unlock() -} - -func (irw *instrumentedResponseWriter) Flush() { - if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - -func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - - if !strings.HasPrefix(keyStr, "http.response.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - irw.mu.Lock() - defer irw.mu.Unlock() - - switch parts[2] { - case "written": - return irw.written - case "status": - return irw.status - case "contenttype": - contentType := irw.Header().Get("Content-Type") - if contentType != "" { - return contentType - } - } - } - -fallback: - return irw.Context.Value(key) -} - -func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - } - - return irw.instrumentedResponseWriter.Value(key) -} diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go deleted file mode 100644 index 86c5964e4..000000000 --- a/vendor/github.com/docker/distribution/context/logger.go +++ /dev/null @@ -1,116 +0,0 @@ -package context - -import ( - "fmt" - - "github.com/sirupsen/logrus" - "runtime" -) - -// Logger provides a leveled-logging interface. -type Logger interface { - // standard logger methods - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) - - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - - Panic(args ...interface{}) - Panicf(format string, args ...interface{}) - Panicln(args ...interface{}) - - // Leveled methods, from logrus - Debug(args ...interface{}) - Debugf(format string, args ...interface{}) - Debugln(args ...interface{}) - - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Errorln(args ...interface{}) - - Info(args ...interface{}) - Infof(format string, args ...interface{}) - Infoln(args ...interface{}) - - Warn(args ...interface{}) - Warnf(format string, args ...interface{}) - Warnln(args ...interface{}) -} - -// WithLogger creates a new context with provided logger. -func WithLogger(ctx Context, logger Logger) Context { - return WithValue(ctx, "logger", logger) -} - -// GetLoggerWithField returns a logger instance with the specified field key -// and value without affecting the context. Extra specified keys will be -// resolved from the context. -func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) -} - -// GetLoggerWithFields returns a logger instance with the specified fields -// without affecting the context. Extra specified keys will be resolved from -// the context. -func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { - // must convert from interface{} -> interface{} to string -> interface{} for logrus. - lfields := make(logrus.Fields, len(fields)) - for key, value := range fields { - lfields[fmt.Sprint(key)] = value - } - - return getLogrusLogger(ctx, keys...).WithFields(lfields) -} - -// GetLogger returns the logger from the current context, if present. If one -// or more keys are provided, they will be resolved on the context and -// included in the logger. While context.Value takes an interface, any key -// argument passed to GetLogger will be passed to fmt.Sprint when expanded as -// a logging key field. If context keys are integer constants, for example, -// its recommended that a String method is implemented. -func GetLogger(ctx Context, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...) -} - -// GetLogrusLogger returns the logrus logger for the context. If one more keys -// are provided, they will be resolved on the context and included in the -// logger. Only use this function if specific logrus functionality is -// required. -func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { - var logger *logrus.Entry - - // Get a logger, if it is present. - loggerInterface := ctx.Value("logger") - if loggerInterface != nil { - if lgr, ok := loggerInterface.(*logrus.Entry); ok { - logger = lgr - } - } - - if logger == nil { - fields := logrus.Fields{} - - // Fill in the instance id, if we have it. - instanceID := ctx.Value("instance.id") - if instanceID != nil { - fields["instance.id"] = instanceID - } - - fields["go.version"] = runtime.Version() - // If no logger is found, just return the standard logger. - logger = logrus.StandardLogger().WithFields(fields) - } - - fields := logrus.Fields{} - for _, key := range keys { - v := ctx.Value(key) - if v != nil { - fields[fmt.Sprint(key)] = v - } - } - - return logger.WithFields(fields) -} diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go deleted file mode 100644 index 721964a84..000000000 --- a/vendor/github.com/docker/distribution/context/trace.go +++ /dev/null @@ -1,104 +0,0 @@ -package context - -import ( - "runtime" - "time" - - "github.com/docker/distribution/uuid" -) - -// WithTrace allocates a traced timing span in a new context. This allows a -// caller to track the time between calling WithTrace and the returned done -// function. When the done function is called, a log message is emitted with a -// "trace.duration" field, corresponding to the elapsed time and a -// "trace.func" field, corresponding to the function that called WithTrace. -// -// The logging keys "trace.id" and "trace.parent.id" are provided to implement -// dapper-like tracing. This function should be complemented with a WithSpan -// method that could be used for tracing distributed RPC calls. -// -// The main benefit of this function is to post-process log messages or -// intercept them in a hook to provide timing data. Trace ids and parent ids -// can also be linked to provide call tracing, if so required. -// -// Here is an example of the usage: -// -// func timedOperation(ctx Context) { -// ctx, done := WithTrace(ctx) -// defer done("this will be the log message") -// // ... function body ... -// } -// -// If the function ran for roughly 1s, such a usage would emit a log message -// as follows: -// -// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... -// -// Notice that the function name is automatically resolved, along with the -// package and a trace id is emitted that can be linked with parent ids. -func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { - if ctx == nil { - ctx = Background() - } - - pc, file, line, _ := runtime.Caller(1) - f := runtime.FuncForPC(pc) - ctx = &traced{ - Context: ctx, - id: uuid.Generate().String(), - start: time.Now(), - parent: GetStringValue(ctx, "trace.id"), - fnname: f.Name(), - file: file, - line: line, - } - - return ctx, func(format string, a ...interface{}) { - GetLogger(ctx, - "trace.duration", - "trace.id", - "trace.parent.id", - "trace.func", - "trace.file", - "trace.line"). - Debugf(format, a...) - } -} - -// traced represents a context that is traced for function call timing. It -// also provides fast lookup for the various attributes that are available on -// the trace. -type traced struct { - Context - id string - parent string - start time.Time - fnname string - file string - line int -} - -func (ts *traced) Value(key interface{}) interface{} { - switch key { - case "trace.start": - return ts.start - case "trace.duration": - return time.Since(ts.start) - case "trace.id": - return ts.id - case "trace.parent.id": - if ts.parent == "" { - return nil // must return nil to signal no parent. - } - - return ts.parent - case "trace.func": - return ts.fnname - case "trace.file": - return ts.file - case "trace.line": - return ts.line - } - - return ts.Context.Value(key) -} diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go deleted file mode 100644 index cb9ef52e3..000000000 --- a/vendor/github.com/docker/distribution/context/util.go +++ /dev/null @@ -1,24 +0,0 @@ -package context - -import ( - "time" -) - -// Since looks up key, which should be a time.Time, and returns the duration -// since that time. If the key is not found, the value returned will be zero. -// This is helpful when inferring metrics related to context execution times. -func Since(ctx Context, key interface{}) time.Duration { - if startedAt, ok := ctx.Value(key).(time.Time); ok { - return time.Since(startedAt) - } - return 0 -} - -// GetStringValue returns a string value from the context. The empty string -// will be returned if not found. -func GetStringValue(ctx Context, key interface{}) (value string) { - if valuev, ok := ctx.Value(key).(string); ok { - value = valuev - } - return value -} diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go deleted file mode 100644 index 746cda02e..000000000 --- a/vendor/github.com/docker/distribution/context/version.go +++ /dev/null @@ -1,16 +0,0 @@ -package context - -// WithVersion stores the application version in the context. The new context -// gets a logger to ensure log messages are marked with the application -// version. -func WithVersion(ctx Context, version string) Context { - ctx = WithValue(ctx, "version", version) - // push a new logger onto the stack - return WithLogger(ctx, GetLogger(ctx, "version")) -} - -// GetVersion returns the application version from the context. An empty -// string may returned if the version was not set on the context. -func GetVersion(ctx Context) string { - return GetStringValue(ctx, "version") -} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go index 4b6ba5628..3facaae62 100644 --- a/vendor/github.com/docker/distribution/manifest/schema2/builder.go +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -1,8 +1,9 @@ package schema2 import ( + "context" + "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go index 2c99f25d3..1816baea1 100644 --- a/vendor/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -1,10 +1,10 @@ package distribution import ( + "context" "fmt" "mime" - "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/docker/distribution/metrics/prometheus.go b/vendor/github.com/docker/distribution/metrics/prometheus.go new file mode 100644 index 000000000..b5a532144 --- /dev/null +++ b/vendor/github.com/docker/distribution/metrics/prometheus.go @@ -0,0 +1,13 @@ +package metrics + +import "github.com/docker/go-metrics" + +const ( + // NamespacePrefix is the namespace of prometheus metrics + NamespacePrefix = "registry" +) + +var ( + // StorageNamespace is the prometheus namespace of blob/cache related operations + StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) +) diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go index 1da1d533f..a3a80ab88 100644 --- a/vendor/github.com/docker/distribution/registry.go +++ b/vendor/github.com/docker/distribution/registry.go @@ -1,7 +1,8 @@ package distribution import ( - "github.com/docker/distribution/context" + "context" + "github.com/docker/distribution/reference" ) @@ -72,6 +73,21 @@ func (o WithTagOption) Apply(m ManifestService) error { return nil } +// WithManifestMediaTypes lists the media types the client wishes +// the server to provide. +func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { + return WithManifestMediaTypesOption{mediaTypes} +} + +// WithManifestMediaTypesOption holds a list of accepted media types +type WithManifestMediaTypesOption struct{ MediaTypes []string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { + // no implementation + return nil +} + // Repository is a named collection of manifests and layers. type Repository interface { // Named returns the name of the repository. diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go index 49a64a86e..d77e70473 100644 --- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -36,9 +36,5 @@ func ServeJSON(w http.ResponseWriter, err error) error { w.WriteHeader(sc) - if err := json.NewEncoder(w).Encode(err); err != nil { - return err - } - - return nil + return json.NewEncoder(w).Encode(err) } diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go index c9bdfc355..6e3f1ccc4 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -45,13 +45,13 @@ type Manager interface { // to a backend. func NewSimpleManager() Manager { return &simpleManager{ - Challanges: make(map[string][]Challenge), + Challenges: make(map[string][]Challenge), } } type simpleManager struct { sync.RWMutex - Challanges map[string][]Challenge + Challenges map[string][]Challenge } func normalizeURL(endpoint *url.URL) { @@ -64,7 +64,7 @@ func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { m.RLock() defer m.RUnlock() - challenges := m.Challanges[endpoint.String()] + challenges := m.Challenges[endpoint.String()] return challenges, nil } @@ -82,7 +82,7 @@ func (m *simpleManager) AddResponse(resp *http.Response) error { m.Lock() defer m.Unlock() - m.Challanges[urlCopy.String()] = challenges + m.Challenges[urlCopy.String()] = challenges return nil } diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go index be474d825..db86c9b06 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -13,7 +13,6 @@ import ( "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" - "github.com/sirupsen/logrus" ) var ( @@ -135,6 +134,8 @@ type tokenHandler struct { tokenLock sync.Mutex tokenCache string tokenExpiration time.Time + + logger Logger } // Scope is a type which is serializable to a string @@ -176,6 +177,18 @@ func (rs RegistryScope) String() string { return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) } +// Logger defines the injectable logging interface, used on TokenHandlers. +type Logger interface { + Debugf(format string, args ...interface{}) +} + +func logDebugf(logger Logger, format string, args ...interface{}) { + if logger == nil { + return + } + logger.Debugf(format, args...) +} + // TokenHandlerOptions is used to configure a new token handler type TokenHandlerOptions struct { Transport http.RoundTripper @@ -185,6 +198,7 @@ type TokenHandlerOptions struct { ForceOAuth bool ClientID string Scopes []Scope + Logger Logger } // An implementation of clock for providing real time data. @@ -220,6 +234,7 @@ func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandl clientID: options.ClientID, scopes: options.Scopes, clock: realClock{}, + logger: options.Logger, } return handler @@ -264,6 +279,9 @@ func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...s } var addedScopes bool for _, scope := range additionalScopes { + if hasScope(scopes, scope) { + continue + } scopes = append(scopes, scope) addedScopes = true } @@ -287,6 +305,15 @@ func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...s return th.tokenCache, nil } +func hasScope(scopes []string, scope string) bool { + for _, s := range scopes { + if s == scope { + return true + } + } + return false +} + type postTokenResponse struct { AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` @@ -348,7 +375,7 @@ func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, servic if tr.ExpiresIn < minimumTokenLifetimeSeconds { // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { @@ -439,7 +466,7 @@ func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, if tr.ExpiresIn < minimumTokenLifetimeSeconds { // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go index e3ffcb00f..695bf852f 100644 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -2,6 +2,7 @@ package client import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -9,7 +10,6 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" ) type httpBlobUpload struct { diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go index b82a968e2..d8e2c795d 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -2,6 +2,7 @@ package client import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -14,7 +15,6 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" @@ -62,7 +62,7 @@ func checkHTTPRedirect(req *http.Request, via []*http.Request) error { } // NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { +func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err @@ -75,9 +75,8 @@ func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTrippe } return ®istry{ - client: client, - ub: ub, - context: ctx, + client: client, + ub: ub, }, nil } @@ -133,7 +132,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri } // NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { +func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err @@ -146,10 +145,9 @@ func NewRepository(ctx context.Context, name reference.Named, baseURL string, tr } return &repository{ - client: client, - ub: ub, - name: name, - context: ctx, + client: client, + ub: ub, + name: name, }, nil } @@ -190,32 +188,35 @@ func (r *repository) Manifests(ctx context.Context, options ...distribution.Mani func (r *repository) Tags(ctx context.Context) distribution.TagService { return &tags{ - client: r.client, - ub: r.ub, - context: r.context, - name: r.Named(), + client: r.client, + ub: r.ub, + name: r.Named(), } } // tags implements remote tagging operations. type tags struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named + client *http.Client + ub *v2.URLBuilder + name reference.Named } // All returns all tags func (t *tags) All(ctx context.Context) ([]string, error) { var tags []string - u, err := t.ub.BuildTagsURL(t.name) + listURLStr, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + listURL, err := url.Parse(listURLStr) if err != nil { return tags, err } for { - resp, err := t.client.Get(u) + resp, err := t.client.Get(listURL.String()) if err != nil { return tags, err } @@ -235,7 +236,13 @@ func (t *tags) All(ctx context.Context) ([]string, error) { } tags = append(tags, tagsResponse.Tags...) if link := resp.Header.Get("Link"); link != "" { - u = strings.Trim(strings.Split(link, ";")[0], "<>") + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return tags, err + } + + listURL = listURL.ResolveReference(linkURL) } else { return tags, nil } @@ -321,7 +328,8 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er defer resp.Body.Close() switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: + case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: + // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers return descriptorFromResponse(resp) default: // if the response is an error - there will be no body to decode. @@ -421,18 +429,22 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis ref reference.Named err error contentDgst *digest.Digest + mediaTypes []string ) for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { + switch opt := option.(type) { + case distribution.WithTagOption: digestOrTag = opt.Tag ref, err = reference.WithTag(ms.name, opt.Tag) if err != nil { return nil, err } - } else if opt, ok := option.(contentDigestOption); ok { + case contentDigestOption: contentDgst = opt.digest - } else { + case distribution.WithManifestMediaTypesOption: + mediaTypes = opt.MediaTypes + default: err := option.Apply(ms) if err != nil { return nil, err @@ -448,6 +460,10 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis } } + if len(mediaTypes) == 0 { + mediaTypes = distribution.ManifestMediaTypes() + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return nil, err @@ -458,7 +474,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis return nil, err } - for _, t := range distribution.ManifestMediaTypes() { + for _, t := range mediaTypes { req.Header.Add("Accept", t) } diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go index f647616bc..ac4c45211 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -1,10 +1,11 @@ package cache import ( - "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" + "context" "github.com/docker/distribution" + prometheus "github.com/docker/distribution/metrics" + "github.com/opencontainers/go-digest" ) // Metrics is used to hold metric counters @@ -16,12 +17,20 @@ type Metrics struct { Misses uint64 } +// Logger can be provided on the MetricsTracker to log errors. +// +// Usually, this is just a proxy to dcontext.GetLogger. +type Logger interface { + Errorf(format string, args ...interface{}) +} + // MetricsTracker represents a metric tracker // which simply counts the number of hits and misses. type MetricsTracker interface { Hit() Miss() Metrics() Metrics + Logger(context.Context) Logger } type cachedBlobStatter struct { @@ -30,6 +39,11 @@ type cachedBlobStatter struct { tracker MetricsTracker } +var ( + // cacheCount is the number of total cache request received/hits/misses + cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") +) + // NewCachedBlobStatter creates a new statter which prefers a cache and // falls back to a backend. func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { @@ -50,20 +64,22 @@ func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, b } func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + cacheCount.WithValues("Request").Inc(1) desc, err := cbds.cache.Stat(ctx, dgst) if err != nil { if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) } goto fallback } - + cacheCount.WithValues("Hit").Inc(1) if cbds.tracker != nil { cbds.tracker.Hit() } return desc, nil fallback: + cacheCount.WithValues("Miss").Inc(1) if cbds.tracker != nil { cbds.tracker.Miss() } @@ -73,7 +89,7 @@ fallback: } if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) } return desc, err @@ -95,7 +111,19 @@ func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) er func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) } return nil } + +func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { + if tracker == nil { + return + } + + logger := tracker.Logger(ctx) + if logger == nil { + return + } + logger.Errorf(format, args...) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go index b2fcaf4e8..42d94d9bd 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -1,10 +1,10 @@ package memory import ( + "context" "sync" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" "github.com/opencontainers/go-digest" diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go index 503056596..f22df2b85 100644 --- a/vendor/github.com/docker/distribution/tags.go +++ b/vendor/github.com/docker/distribution/tags.go @@ -1,7 +1,7 @@ package distribution import ( - "github.com/docker/distribution/context" + "context" ) // TagService provides access to information about tagged objects. diff --git a/vendor/github.com/docker/docker-credential-helpers/pass/pass_linux.go b/vendor/github.com/docker/docker-credential-helpers/pass/pass_linux.go deleted file mode 100644 index bb4de0780..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/pass/pass_linux.go +++ /dev/null @@ -1,239 +0,0 @@ -// A `pass` based credential helper. Passwords are stored as arguments to pass -// of the form: "$PASS_FOLDER/base64-url(serverURL)/username". We base64-url -// encode the serverURL, because under the hood pass uses files and folders, so -// /s will get translated into additional folders. -package pass - -import ( - "encoding/base64" - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "strings" - - "github.com/docker/docker-credential-helpers/credentials" -) - -const PASS_FOLDER = "docker-credential-helpers" - -var ( - PassInitialized bool -) - -func init() { - // In principle, we could just run `pass init`. However, pass has a bug - // where if gpg fails, it doesn't always exit 1. Additionally, pass - // uses gpg2, but gpg is the default, which may be confusing. So let's - // just explictily check that pass actually can store and retreive a - // password. - password := "pass is initialized" - name := path.Join(PASS_FOLDER, "docker-pass-initialized-check") - - _, err := runPass(password, "insert", "-f", "-m", name) - if err != nil { - return - } - - stored, err := runPass("", "show", name) - PassInitialized = err == nil && stored == password - - if PassInitialized { - runPass("", "rm", "-rf", name) - } -} - -func runPass(stdinContent string, args ...string) (string, error) { - cmd := exec.Command("pass", args...) - - stdin, err := cmd.StdinPipe() - if err != nil { - return "", err - } - defer stdin.Close() - - stderr, err := cmd.StderrPipe() - if err != nil { - return "", err - } - defer stderr.Close() - - stdout, err := cmd.StdoutPipe() - if err != nil { - return "", err - } - defer stdout.Close() - - err = cmd.Start() - if err != nil { - return "", err - } - - _, err = stdin.Write([]byte(stdinContent)) - if err != nil { - return "", err - } - stdin.Close() - - errContent, err := ioutil.ReadAll(stderr) - if err != nil { - return "", fmt.Errorf("error reading stderr: %s", err) - } - - result, err := ioutil.ReadAll(stdout) - if err != nil { - return "", fmt.Errorf("Error reading stdout: %s", err) - } - - cmdErr := cmd.Wait() - if cmdErr != nil { - return "", fmt.Errorf("%s: %s", cmdErr, errContent) - } - - return string(result), nil -} - -// Pass handles secrets using Linux secret-service as a store. -type Pass struct{} - -// Add adds new credentials to the keychain. -func (h Pass) Add(creds *credentials.Credentials) error { - if !PassInitialized { - return errors.New("pass store is uninitialized") - } - - if creds == nil { - return errors.New("missing credentials") - } - - encoded := base64.URLEncoding.EncodeToString([]byte(creds.ServerURL)) - - _, err := runPass(creds.Secret, "insert", "-f", "-m", path.Join(PASS_FOLDER, encoded, creds.Username)) - return err -} - -// Delete removes credentials from the store. -func (h Pass) Delete(serverURL string) error { - if !PassInitialized { - return errors.New("pass store is uninitialized") - } - - if serverURL == "" { - return errors.New("missing server url") - } - - encoded := base64.URLEncoding.EncodeToString([]byte(serverURL)) - _, err := runPass("", "rm", "-rf", path.Join(PASS_FOLDER, encoded)) - return err -} - -func getPassDir() string { - passDir := os.ExpandEnv("$HOME/.password-store") - for _, e := range os.Environ() { - parts := strings.SplitN(e, "=", 2) - if len(parts) < 2 { - continue - } - - if parts[0] != "PASSWORD_STORE_DIR" { - continue - } - - passDir = parts[1] - break - } - - return passDir -} - -// listPassDir lists all the contents of a directory in the password store. -// Pass uses fancy unicode to emit stuff to stdout, so rather than try -// and parse this, let's just look at the directory structure instead. -func listPassDir(args ...string) ([]os.FileInfo, error) { - passDir := getPassDir() - p := path.Join(append([]string{passDir, PASS_FOLDER}, args...)...) - contents, err := ioutil.ReadDir(p) - if err != nil { - if os.IsNotExist(err) { - return []os.FileInfo{}, nil - } - - return nil, err - } - - return contents, nil -} - -// Get returns the username and secret to use for a given registry server URL. -func (h Pass) Get(serverURL string) (string, string, error) { - if !PassInitialized { - return "", "", errors.New("pass store is uninitialized") - } - - if serverURL == "" { - return "", "", errors.New("missing server url") - } - - encoded := base64.URLEncoding.EncodeToString([]byte(serverURL)) - - if _, err := os.Stat(path.Join(getPassDir(), PASS_FOLDER, encoded)); err != nil { - if os.IsNotExist(err) { - return "", "", nil; - } - - return "", "", err - } - - usernames, err := listPassDir(encoded) - if err != nil { - return "", "", err - } - - if len(usernames) < 1 { - return "", "", fmt.Errorf("no usernames for %s", serverURL) - } - - actual := strings.TrimSuffix(usernames[0].Name(), ".gpg") - secret, err := runPass("", "show", path.Join(PASS_FOLDER, encoded, actual)) - return actual, secret, err -} - -// List returns the stored URLs and corresponding usernames for a given credentials label -func (h Pass) List() (map[string]string, error) { - if !PassInitialized { - return nil, errors.New("pass store is uninitialized") - } - - servers, err := listPassDir() - if err != nil { - return nil, err - } - - resp := map[string]string{} - - for _, server := range servers { - if !server.IsDir() { - continue - } - - serverURL, err := base64.URLEncoding.DecodeString(server.Name()) - if err != nil { - return nil, err - } - - usernames, err := listPassDir(server.Name()) - if err != nil { - return nil, err - } - - if len(usernames) < 1 { - return nil, fmt.Errorf("no usernames for %s", serverURL) - } - - resp[string(serverURL)] = strings.TrimSuffix(usernames[0].Name(), ".gpg") - } - - return resp, nil -} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index c5dafd722..46102d740 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -39,7 +39,7 @@ Ahmed Kamal Ahmet Alp Balkan Aidan Feldman Aidan Hobson Sayers -AJ Bowen +AJ Bowen Ajey Charantimath ajneu Akash Gupta @@ -54,6 +54,7 @@ Alan Scherger Alan Thompson Albert Callarisa Albert Zhang +Alejandro González Hevia Aleksa Sarai Aleksandrs Fadins Alena Prokharchyk @@ -65,6 +66,7 @@ Alex Coventry Alex Crawford Alex Ellis Alex Gaynor +Alex Goodman Alex Olshansky Alex Samorukov Alex Warhawk @@ -77,6 +79,7 @@ Alexander Shopov Alexandre Beslic Alexandre Garnier Alexandre González +Alexandre Jomin Alexandru Sfirlogea Alexey Guskov Alexey Kotlyarov @@ -98,11 +101,13 @@ Amir Goldstein Amit Bakshi Amit Krishnan Amit Shukla +Amr Gawish Amy Lindburg Anand Patil AnandkumarPatel Anatoly Borodin Anchal Agrawal +Anda Xu Anders Janmyr Andre Dublin <81dublin@gmail.com> Andre Granovsky @@ -197,6 +202,7 @@ Ben Toews Ben Wiklund Benjamin Atkin Benjamin Boudreau +Benjamin Yolken Benoit Chesneau Bernerd Schaefer Bernhard M. Wiedemann @@ -267,6 +273,7 @@ Carlos Sanchez Carol Fager-Higgins Cary Casey Bisson +Catalin Pirvu Ce Gao Cedric Davies Cezar Sa Espinola @@ -293,6 +300,8 @@ Chen Min Chen Mingjie Chen Qiu Cheng-mean Liu +Chengguang Xu +chenyuzhu Chetan Birajdar Chewey Chia-liang Kao @@ -313,6 +322,7 @@ Chris Snow Chris St. Pierre Chris Stivers Chris Swan +Chris Telfer Chris Wahl Chris Weyl Christian Berendt @@ -336,6 +346,7 @@ Chun Chen Ciro S. Costa Clayton Coleman Clinton Kitson +Cody Roseborough Coenraad Loubser Colin Dunklau Colin Hebert @@ -411,6 +422,7 @@ Dave MacDonald Dave Tucker David Anderson David Calavera +David Chung David Corking David Cramer David Currie @@ -510,6 +522,7 @@ Eike Herzbach Eivin Giske Skaaren Eivind Uggedal Elan Ruusamäe +Elango Sivanandam Elena Morozova Eli Uriegas Elias Faxö @@ -548,6 +561,7 @@ Erik St. Martin Erik Weathers Erno Hopearuoho Erwin van der Koogh +Ethan Bell Euan Kemp Eugen Krizo Eugene Yakubovich @@ -575,6 +589,7 @@ Fabrizio Regini Fabrizio Soppelsa Faiz Khan falmp +Fangming Fang Fangyuan Gao <21551127@zju.edu.cn> Fareed Dudhia Fathi Boudra @@ -673,6 +688,7 @@ Guilherme Salgado Guillaume Dufour Guillaume J. Charmes guoxiuyan +Guri Gurjeet Singh Guruprasad Gustav Sinder @@ -696,6 +712,7 @@ heartlock <21521209@zju.edu.cn> Hector Castro Helen Xie Henning Sprang +Hiroshi Hatake Hobofan Hollie Teal Hong Xu @@ -808,6 +825,7 @@ Jean-Pierre Huynh Jean-Tiare Le Bigot Jeeva S. Chelladhurai Jeff Anderson +Jeff Hajewski Jeff Johnston Jeff Lindsay Jeff Mickey @@ -893,6 +911,7 @@ Jonas Pfenniger Jonathan A. Sternberg Jonathan Boulle Jonathan Camp +Jonathan Choy Jonathan Dowland Jonathan Lebon Jonathan Lomas @@ -962,6 +981,7 @@ Kareem Khazem kargakis Karl Grzeszczak Karol Duleba +Karthik Karanth Karthik Nayak Kate Heddleston Katie McLaughlin @@ -1108,6 +1128,7 @@ Manfred Zabarauskas Manjunath A Kumatagi Mansi Nahar Manuel Meurer +Manuel Rüger Manuel Woelker mapk0y Marc Abramowitz @@ -1149,10 +1170,12 @@ Martin Mosegaard Amdisen Martin Redmond Mary Anthony Masahito Zembutsu +Masato Ohba Masayuki Morita Mason Malone Mateusz Sulima Mathias Monnerville +Mathieu Champlon Mathieu Le Marec - Pasquet Mathieu Parent Matt Apperson @@ -1209,6 +1232,7 @@ Michael Huettermann Michael Irwin Michael Käufl Michael Neale +Michael Nussbaum Michael Prokop Michael Scharf Michael Spetsiotis @@ -1223,6 +1247,7 @@ Michal Minář Michal Wieczorek Michaël Pailloncy Michał Czeraszkiewicz +Michał Gryko Michiel@unhosted Mickaël FORTUNATO Miguel Angel Fernández @@ -1239,6 +1264,7 @@ Mike Estes Mike Gaffney Mike Goelzer Mike Leone +Mike Lundy Mike MacCana Mike Naberezny Mike Snitzer @@ -1297,6 +1323,7 @@ Niall O'Higgins Nicholas E. Rabenau Nick DeCoursin Nick Irvine +Nick Neisen Nick Parker Nick Payne Nick Russo @@ -1322,6 +1349,7 @@ Nishant Totla NIWA Hideyuki Noah Meyerhans Noah Treuhaft +NobodyOnSE noducks Nolan Darilek nponeccop @@ -1329,7 +1357,6 @@ Nuutti Kotivuori nzwsch O.S. Tezer objectified -odk- Oguz Bilgic Oh Jinkyun Ohad Schneider @@ -1352,6 +1379,7 @@ Patrick Böänziger Patrick Devine Patrick Hemmer Patrick Stapleton +Patrik Cyvoct pattichen Paul paul @@ -1423,6 +1451,7 @@ Pradip Dhara Prasanna Gautam Pratik Karki Prayag Verma +Priya Wadhwa Przemek Hejman Pure White pysqz @@ -1838,11 +1867,13 @@ Wang Xing Wang Yuexiao Ward Vandewege WarheadsSE +Wassim Dhif Wayne Chang Wayne Song Weerasak Chongnguluam Wei Wu Wei-Ting Kuo +weipeng weiyan Weiyang Zhu Wen Cheng Ma @@ -1948,5 +1979,6 @@ Zunayed Ali Átila Camurça Alves 尹吉峰 徐俊杰 +慕陶 搏通 黄艳红00139573 diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index beb251a98..255a81aed 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,9 +3,9 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion string = "1.37" + DefaultVersion = "1.38" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. - NoBaseImageSpecifier string = "scratch" + NoBaseImageSpecifier = "scratch" ) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go index af1a54164..504b0c90d 100644 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -3,4 +3,4 @@ package api // import "github.com/docker/docker/api" // MinVersion represents Minimum REST API version supported -const MinVersion string = "1.12" +const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 18b36d592..3df8d2336 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - units "github.com/docker/go-units" + "github.com/docker/go-units" ) // CheckpointCreateOptions holds parameters to create a checkpoint from a container @@ -181,8 +181,24 @@ type ImageBuildOptions struct { Target string SessionID string Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string } +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit = "2" +) + // ImageBuildResponse holds information // returned by a server after building // an image. diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 02271ecd9..4ef26fa6c 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -401,6 +401,12 @@ type HostConfig struct { // Mounts specs used by the container Mounts []mount.Mount `json:",omitempty"` + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + // Run a custom init inside the container, if null, use the daemon's configured settings Init *bool `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go index cab333e01..abae48b9a 100644 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -121,6 +121,9 @@ type PluginConfigArgs struct { // swagger:model PluginConfigInterface type PluginConfigInterface struct { + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + // socket // Required: true Socket string `json:"Socket"` diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go index ad52d46d5..d91234744 100644 --- a/vendor/github.com/docker/docker/api/types/port.go +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -7,7 +7,7 @@ package types // swagger:model Port type Port struct { - // IP + // Host IP address that the container's port is mapped to IP string `json:"IP,omitempty"` // Port on the container diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 0041653c9..151211ff5 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -55,6 +55,7 @@ type ContainerSpec struct { User string `json:",omitempty"` Groups []string `json:",omitempty"` Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` StopSignal string `json:",omitempty"` TTY bool `json:",omitempty"` OpenStdin bool `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go index 81b5f4cfd..0c77403cc 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -11,9 +11,17 @@ const ( RuntimeContainer RuntimeType = "container" // RuntimePlugin is the plugin based runtime RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" // RuntimeURLContainer is the proto url for the container type RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" // RuntimeURLPlugin is the proto url for the plugin type RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" ) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index 5c2bc492e..b35605d12 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -60,10 +60,13 @@ type Task struct { // TaskSpec represents the spec of a task. type TaskSpec struct { - // ContainerSpec and PluginSpec are mutually exclusive. - // PluginSpec will only be used when the `Runtime` field is set to `plugin` - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` Resources *ResourceRequirements `json:",omitempty"` RestartPolicy *RestartPolicy `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go index 8d573accb..ea3495efe 100644 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -82,11 +82,14 @@ func GetTimestamp(value string, reference time.Time) (string, error) { } if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp + // if there is a `-` then it's an RFC3339 like timestamp if strings.Contains(value, "-") { return "", err // was probably an RFC3339 like timestamp but the parser failed with an error } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) } return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil @@ -104,6 +107,10 @@ func ParseTimestamps(value string, def int64) (int64, int64, error) { if value == "" { return def, 0, nil } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { sa := strings.SplitN(value, ".", 2) s, err := strconv.ParseInt(sa[0], 10, 64) if err != nil { diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index 729f4eb6c..06c0ca3a6 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -512,7 +512,8 @@ type DiskUsage struct { Images []*ImageSummary Containers []*Container Volumes []*Volume - BuilderSize int64 + BuildCache []*BuildCache + BuilderSize int64 // deprecated } // ContainersPruneReport contains the response for Engine API: @@ -585,3 +586,17 @@ type PushResult struct { type BuildResult struct { ID string } + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Mutable bool + InUse bool + Size int64 + + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int + Parent string + Description string +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go similarity index 81% rename from vendor/github.com/docker/docker/api/types/volume/volumes_create.go rename to vendor/github.com/docker/docker/api/types/volume/volume_create.go index b2dd3a419..539e9b97d 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -1,4 +1,4 @@ -package volume // import "github.com/docker/docker/api/types/volume" +package volume // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,9 +7,9 @@ package volume // import "github.com/docker/docker/api/types/volume" // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// VolumesCreateBody volumes create body -// swagger:model VolumesCreateBody -type VolumesCreateBody struct { +// VolumeCreateBody +// swagger:model VolumeCreateBody +type VolumeCreateBody struct { // Name of the volume driver to use. // Required: true diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go similarity index 74% rename from vendor/github.com/docker/docker/api/types/volume/volumes_list.go rename to vendor/github.com/docker/docker/api/types/volume/volume_list.go index e071ca08f..1bb279dbb 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -1,4 +1,4 @@ -package volume // import "github.com/docker/docker/api/types/volume" +package volume // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -9,9 +9,9 @@ package volume // import "github.com/docker/docker/api/types/volume" import "github.com/docker/docker/api/types" -// VolumesListOKBody volumes list o k body -// swagger:model VolumesListOKBody -type VolumesListOKBody struct { +// VolumeListOKBody +// swagger:model VolumeListOKBody +type VolumeListOKBody struct { // List of volumes // Required: true diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 000000000..4cf8c980a --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// BuildCancel requests the daemon to cancel ongoing build request +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + if err != nil { + return err + } + defer ensureReaderClosed(serverResp) + + return nil +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index b8428128b..b874b3b52 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -356,6 +356,11 @@ func (cli *Client) DaemonHost() string { return cli.host } +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + return &*cli.client +} + // ParseHostURL parses a url string, validates the string is a host url, and // returns the parsed URL func ParseHostURL(host string) (*url.URL, error) { diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 7c7fc9ea5..5b6541f03 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -8,6 +8,7 @@ import ( "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ContainerLogs returns the logs generated by a container in an io.ReadCloser. @@ -45,7 +46,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } @@ -53,7 +54,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options if options.Until != "" { ts, err := timetypes.GetTimestamp(options.Until, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "until"`) } query.Set("until", ts) } diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index 5d7f1606e..629d7ab64 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -8,8 +8,13 @@ import ( timetypes "github.com/docker/docker/api/types/time" ) -// ContainerStop stops a container without terminating the process. -// The process is blocked until the container stops or the timeout expires. +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { query := url.Values{} if timeout != nil { diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 05c124627..0461af329 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -2,7 +2,6 @@ package client // import "github.com/docker/docker/client" import ( "fmt" - "net/http" "github.com/docker/docker/api/types/versions" diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 2b14831fd..35f5dd86d 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -9,7 +9,6 @@ import ( "net/http" "net/http/httputil" "net/url" - "strings" "time" "github.com/docker/docker/api/types" @@ -17,21 +16,6 @@ import ( "github.com/pkg/errors" ) -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - // postHijacked sends a POST request and hijacks the connection. func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { bodyEncoded, err := encodeData(body) @@ -54,96 +38,9 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err } -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := time.Until(dialer.Deadline) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - config = tlsConfigClone(config) - config.ServerName = hostname - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) + return tls.Dial(proto, addr, tlsConfig) } if proto == "npipe" { return sockets.DialPipe(addr, 32*time.Second) diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 672146031..dff19b989 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -133,5 +133,9 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur if options.Platform != "" { query.Set("platform", strings.ToLower(options.Platform)) } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) return query, nil } diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 314c940c3..9250c468a 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -4,6 +4,7 @@ import ( "context" "io" "net" + "net/http" "time" "github.com/docker/docker/api/types" @@ -33,6 +34,7 @@ type CommonAPIClient interface { VolumeAPIClient ClientVersion() string DaemonHost() string + HTTPClient() *http.Client ServerVersion(ctx context.Context) (types.Version, error) NegotiateAPIVersion(ctx context.Context) NegotiateAPIVersionPing(types.Ping) @@ -84,6 +86,7 @@ type DistributionAPIClient interface { type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) @@ -168,10 +171,10 @@ type SystemAPIClient interface { // VolumeAPIClient defines API client methods for the volumes type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) } diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index c07fc0eb3..e7a750571 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -1,9 +1,8 @@ package client // import "github.com/docker/docker/client" import ( - "net/url" - "context" + "net/url" "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index 871460c7b..8fadda4a9 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -9,7 +9,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -136,7 +136,7 @@ func imageWithDigestString(image string, dgst digest.Digest) string { // imageWithTagString takes an image string, and returns a tagged image // string, adding a 'latest' tag if one was not provided. It returns an -// emptry string if a canonical reference was provided +// empty string if a canonical reference was provided func imageWithTagString(image string) string { namedRef, err := reference.ParseNormalizedNamed(image) if err == nil { diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go index af96b1e62..906fd4059 100644 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -8,6 +8,7 @@ import ( "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ServiceLogs returns the logs generated by a service in an io.ReadCloser. @@ -25,7 +26,7 @@ func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options ty if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone.go b/vendor/github.com/docker/docker/client/tlsconfig_clone.go deleted file mode 100644 index 88200e92c..000000000 --- a/vendor/github.com/docker/docker/client/tlsconfig_clone.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.8 - -package client // import "github.com/docker/docker/client" - -import "crypto/tls" - -// tlsConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func tlsConfigClone(c *tls.Config) *tls.Config { - return c.Clone() -} diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go deleted file mode 100644 index e29854236..000000000 --- a/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.7,!go1.8 - -package client // import "github.com/docker/docker/client" - -import "crypto/tls" - -// tlsConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func tlsConfigClone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index 639c32528..f1f6fcdc4 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -9,7 +9,7 @@ import ( ) // VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { var volume types.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 4cf74831f..284554d67 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -10,8 +10,8 @@ import ( ) // VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { - var volumes volumetypes.VolumesListOKBody +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + var volumes volumetypes.VolumeListOKBody query := url.Values{} if filter.Len() > 0 { diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE deleted file mode 100644 index d511905c1..000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE deleted file mode 100644 index 5b6e7c66c..000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,340 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Library General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go index cc26e4b75..e0513331b 100644 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -47,7 +47,7 @@ func IsConflict(err error) bool { return ok } -// IsUnauthorized returns if the the passed in error is an ErrUnauthorized +// IsUnauthorized returns if the passed in error is an ErrUnauthorized func IsUnauthorized(err error) bool { _, ok := getImplementer(err).(ErrUnauthorized) return ok diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 5fb3995e9..daddebded 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -127,6 +127,7 @@ func IsArchivePath(path string) bool { if err != nil { return false } + defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go index e75bfc9b6..69aadd823 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -73,5 +73,5 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { // no notion of file ownership mapping yet on Windows - return idtools.IDPair{0, 0}, nil + return idtools.IDPair{UID: 0, GID: 0}, nil } diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index d0cff98ff..fae4b9de0 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -247,10 +247,12 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform if decompress { - layer, err = DecompressStream(layer) + decompLayer, err := DecompressStream(layer) if err != nil { return 0, err } + defer decompLayer.Close() + layer = decompLayer } return UnpackLayer(dest, layer, options) } diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go index 58aefe3ef..797143ee8 100644 --- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -9,7 +9,7 @@ func timeToTimespec(time time.Time) (ts syscall.Timespec) { if time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) + ts.Nsec = (1 << 30) - 2 return } return syscall.NsecToTimespec(time.UnixNano()) diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 88e0f2834..28cad499a 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -13,7 +13,7 @@ import ( "github.com/sirupsen/logrus" ) -// PatternMatcher allows checking paths agaist a list of patterns +// PatternMatcher allows checking paths against a list of patterns type PatternMatcher struct { patterns []*Pattern exclusions bool diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index e2b493158..d1f173a31 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -30,8 +30,8 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" + subuidFileName = "/etc/subuid" + subgidFileName = "/etc/subgid" ) // MkdirAllAndChown creates a directory (include any along the path) and then modifies diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index b9f40d3ef..dd95f3670 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -8,9 +8,9 @@ import ( "strings" "time" - gotty "github.com/Nvveen/Gotty" + "github.com/Nvveen/Gotty" "github.com/docker/docker/pkg/term" - units "github.com/docker/go-units" + "github.com/docker/go-units" ) // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to @@ -245,7 +245,7 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { // DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` // describes if `out` is a terminal. If this is the case, it will print `\n` at the end of // each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { var ( dec = json.NewDecoder(in) ids = make(map[string]int) @@ -277,7 +277,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, if jm.Aux != nil { if auxCallback != nil { - auxCallback(jm.Aux) + auxCallback(jm) } continue } @@ -330,6 +330,6 @@ type stream interface { } // DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) } diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go index 3b1e18736..8f6e0a737 100644 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -21,7 +21,7 @@ const ( // Stderr represents standard error steam type. Stderr // Systemerr represents errors originating from the system that make it - // into the the multiplexed stream. + // into the multiplexed stream. Systemerr stdWriterPrefixLen = 8 diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go index 5c4a5260f..da733e584 100644 --- a/vendor/github.com/docker/docker/pkg/term/proxy.go +++ b/vendor/github.com/docker/docker/pkg/term/proxy.go @@ -34,6 +34,10 @@ func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { func (r *escapeProxy) Read(buf []byte) (int, error) { nr, err := r.r.Read(buf) + if len(r.escapeKeys) == 0 { + return nr, err + } + preserve := func() { // this preserves the original key presses in the passed in buffer nr += r.escapeKeyPos diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go index 1f8965969..3e5593ca6 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -9,7 +9,7 @@ import ( "os" "sync" - ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go index 94c56dca7..832fdb95a 100644 --- a/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ b/vendor/github.com/docker/docker/registry/endpoint_v1.go @@ -69,7 +69,7 @@ func validateEndpoint(endpoint *V1Endpoint) error { func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint { endpoint := &V1Endpoint{ - IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), + IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, URL: new(url.URL), } diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go index 1d0bae2ca..ef1429959 100644 --- a/vendor/github.com/docker/docker/registry/session.go +++ b/vendor/github.com/docker/docker/registry/session.go @@ -3,7 +3,6 @@ package registry // import "github.com/docker/docker/registry" import ( "bytes" "crypto/sha256" - "sync" // this is required for some certificates _ "crypto/sha512" "encoding/hex" @@ -16,6 +15,7 @@ import ( "net/url" "strconv" "strings" + "sync" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" diff --git a/vendor/github.com/docker/go-metrics/LICENSE.code b/vendor/github.com/docker/go-metrics/LICENSE.code new file mode 100644 index 000000000..8f3fee627 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE new file mode 100644 index 000000000..8915f0277 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go new file mode 100644 index 000000000..fe36316a4 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/counter.go @@ -0,0 +1,52 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Counter is a metrics that can only increment its current count +type Counter interface { + // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. + // + // If len(vs) == 0, increments the counter by 1. + Inc(vs ...float64) +} + +// LabeledCounter is counter that must have labels populated before use. +type LabeledCounter interface { + WithValues(vs ...string) Counter +} + +type labeledCounter struct { + pc *prometheus.CounterVec +} + +func (lc *labeledCounter) WithValues(vs ...string) Counter { + return &counter{pc: lc.pc.WithLabelValues(vs...)} +} + +func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { + lc.pc.Describe(ch) +} + +func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { + lc.pc.Collect(ch) +} + +type counter struct { + pc prometheus.Counter +} + +func (c *counter) Inc(vs ...float64) { + if len(vs) == 0 { + c.pc.Inc() + } + + c.pc.Add(sumFloat64(vs...)) +} + +func (c *counter) Describe(ch chan<- *prometheus.Desc) { + c.pc.Describe(ch) +} + +func (c *counter) Collect(ch chan<- prometheus.Metric) { + c.pc.Collect(ch) +} diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go new file mode 100644 index 000000000..8fbdfc697 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/docs.go @@ -0,0 +1,3 @@ +// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +package metrics diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go new file mode 100644 index 000000000..74296e877 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/gauge.go @@ -0,0 +1,72 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Gauge is a metric that allows incrementing and decrementing a value +type Gauge interface { + Inc(...float64) + Dec(...float64) + + // Add adds the provided value to the gauge's current value + Add(float64) + + // Set replaces the gauge's current value with the provided value + Set(float64) +} + +// LabeledGauge describes a gauge the must have values populated before use. +type LabeledGauge interface { + WithValues(labels ...string) Gauge +} + +type labeledGauge struct { + pg *prometheus.GaugeVec +} + +func (lg *labeledGauge) WithValues(labels ...string) Gauge { + return &gauge{pg: lg.pg.WithLabelValues(labels...)} +} + +func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { + lg.pg.Describe(c) +} + +func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { + lg.pg.Collect(c) +} + +type gauge struct { + pg prometheus.Gauge +} + +func (g *gauge) Inc(vs ...float64) { + if len(vs) == 0 { + g.pg.Inc() + } + + g.Add(sumFloat64(vs...)) +} + +func (g *gauge) Dec(vs ...float64) { + if len(vs) == 0 { + g.pg.Dec() + } + + g.Add(-sumFloat64(vs...)) +} + +func (g *gauge) Add(v float64) { + g.pg.Add(v) +} + +func (g *gauge) Set(v float64) { + g.pg.Set(v) +} + +func (g *gauge) Describe(c chan<- *prometheus.Desc) { + g.pg.Describe(c) +} + +func (g *gauge) Collect(c chan<- prometheus.Metric) { + g.pg.Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go new file mode 100644 index 000000000..bb3be41d3 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/handler.go @@ -0,0 +1,13 @@ +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +// Handler returns the global http.Handler that provides the prometheus +// metrics format on GET requests +func Handler() http.Handler { + return prometheus.Handler() +} diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go new file mode 100644 index 000000000..68b7f51b3 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/helpers.go @@ -0,0 +1,10 @@ +package metrics + +func sumFloat64(vs ...float64) float64 { + var sum float64 + for _, v := range vs { + sum += v + } + + return sum +} diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go new file mode 100644 index 000000000..7734c2945 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/namespace.go @@ -0,0 +1,181 @@ +package metrics + +import ( + "fmt" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type Labels map[string]string + +// NewNamespace returns a namespaces that is responsible for managing a collection of +// metrics for a particual namespace and subsystem +// +// labels allows const labels to be added to all metrics created in this namespace +// and are commonly used for data like application version and git commit +func NewNamespace(name, subsystem string, labels Labels) *Namespace { + if labels == nil { + labels = make(map[string]string) + } + return &Namespace{ + name: name, + subsystem: subsystem, + labels: labels, + } +} + +// Namespace describes a set of metrics that share a namespace and subsystem. +type Namespace struct { + name string + subsystem string + labels Labels + mu sync.Mutex + metrics []prometheus.Collector +} + +// WithConstLabels returns a namespace with the provided set of labels merged +// with the existing constant labels on the namespace. +// +// Only metrics created with the returned namespace will get the new constant +// labels. The returned namespace must be registered separately. +func (n *Namespace) WithConstLabels(labels Labels) *Namespace { + n.mu.Lock() + ns := &Namespace{ + name: n.name, + subsystem: n.subsystem, + labels: mergeLabels(n.labels, labels), + } + n.mu.Unlock() + return ns +} + +func (n *Namespace) NewCounter(name, help string) Counter { + c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} + n.Add(c) + return c +} + +func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { + c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} + n.Add(c) + return c +} + +func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Total), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewTimer(name, help string) Timer { + t := &timer{ + m: prometheus.NewHistogram(n.newTimerOpts(name, help)), + } + n.Add(t) + return t +} + +func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { + t := &labeledTimer{ + m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), + } + n.Add(t) + return t +} + +func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Seconds), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { + g := &gauge{ + pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), + } + n.Add(g) + return g +} + +func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { + g := &labeledGauge{ + pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), + } + n.Add(g) + return g +} + +func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, unit), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Describe(ch) + } +} + +func (n *Namespace) Collect(ch chan<- prometheus.Metric) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Collect(ch) + } +} + +func (n *Namespace) Add(collector prometheus.Collector) { + n.mu.Lock() + n.metrics = append(n.metrics, collector) + n.mu.Unlock() +} + +func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { + name = makeName(name, unit) + namespace := n.name + if n.subsystem != "" { + namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) + } + name = fmt.Sprintf("%s_%s", namespace, name) + return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) +} + +// mergeLabels merges two or more labels objects into a single map, favoring +// the later labels. +func mergeLabels(lbs ...Labels) Labels { + merged := make(Labels) + + for _, target := range lbs { + for k, v := range target { + merged[k] = v + } + } + + return merged +} + +func makeName(name string, unit Unit) string { + if unit == "" { + return name + } + + return fmt.Sprintf("%s_%s", name, unit) +} diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go new file mode 100644 index 000000000..708358df0 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/register.go @@ -0,0 +1,15 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Register adds all the metrics in the provided namespace to the global +// metrics registry +func Register(n *Namespace) { + prometheus.MustRegister(n) +} + +// Deregister removes all the metrics in the provided namespace from the +// global metrics registry +func Deregister(n *Namespace) { + prometheus.Unregister(n) +} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go new file mode 100644 index 000000000..e91eca76b --- /dev/null +++ b/vendor/github.com/docker/go-metrics/timer.go @@ -0,0 +1,68 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// StartTimer begins a timer observation at the callsite. When the target +// operation is completed, the caller should call the return done func(). +func StartTimer(timer Timer) (done func()) { + start := time.Now() + return func() { + timer.Update(time.Since(start)) + } +} + +// Timer is a metric that allows collecting the duration of an action in seconds +type Timer interface { + // Update records an observation, duration, and converts to the target + // units. + Update(duration time.Duration) + + // UpdateSince will add the duration from the provided starting time to the + // timer's summary with the precisions that was used in creation of the timer + UpdateSince(time.Time) +} + +// LabeledTimer is a timer that must have label values populated before use. +type LabeledTimer interface { + WithValues(labels ...string) Timer +} + +type labeledTimer struct { + m *prometheus.HistogramVec +} + +func (lt *labeledTimer) WithValues(labels ...string) Timer { + return &timer{m: lt.m.WithLabelValues(labels...)} +} + +func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { + lt.m.Describe(c) +} + +func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { + lt.m.Collect(c) +} + +type timer struct { + m prometheus.Histogram +} + +func (t *timer) Update(duration time.Duration) { + t.m.Observe(duration.Seconds()) +} + +func (t *timer) UpdateSince(since time.Time) { + t.m.Observe(time.Since(since).Seconds()) +} + +func (t *timer) Describe(c chan<- *prometheus.Desc) { + t.m.Describe(c) +} + +func (t *timer) Collect(c chan<- prometheus.Metric) { + t.m.Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go new file mode 100644 index 000000000..c96622f90 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/unit.go @@ -0,0 +1,12 @@ +package metrics + +// Unit represents the type or precision of a metric that is appended to +// the metrics fully qualified name +type Unit string + +const ( + Nanoseconds Unit = "nanoseconds" + Seconds Unit = "seconds" + Bytes Unit = "bytes" + Total Unit = "total" +) diff --git a/vendor/github.com/docker/swarmkit/LICENSE b/vendor/github.com/docker/swarmkit/LICENSE index 8dada3eda..e2db6ed11 100644 --- a/vendor/github.com/docker/swarmkit/LICENSE +++ b/vendor/github.com/docker/swarmkit/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright 2016-2018 Docker Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/swarmkit/api/ca.pb.go b/vendor/github.com/docker/swarmkit/api/ca.pb.go index 7d289126a..23d375f95 100644 --- a/vendor/github.com/docker/swarmkit/api/ca.pb.go +++ b/vendor/github.com/docker/swarmkit/api/ca.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/ca.proto -// DO NOT EDIT! /* Package api is a generated protocol buffer package. @@ -228,18 +227,16 @@ import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/docker/swarmkit/protobuf/plugin" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" import raftselector "github.com/docker/swarmkit/manager/raftselector" import codes "google.golang.org/grpc/codes" import status "google.golang.org/grpc/status" import metadata "google.golang.org/grpc/metadata" -import transport "google.golang.org/grpc/transport" +import peer "google.golang.org/grpc/peer" import rafttime "time" import strings "strings" @@ -419,11 +416,11 @@ func (m *NodeCertificateStatusResponse) CopyFrom(src interface{}) { *m = *o if o.Status != nil { m.Status = &IssuanceStatus{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Status, o.Status) + deepcopy.Copy(m.Status, o.Status) } if o.Certificate != nil { m.Certificate = &Certificate{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Certificate, o.Certificate) + deepcopy.Copy(m.Certificate, o.Certificate) } } @@ -517,7 +514,7 @@ func (m *GetUnlockKeyResponse) CopyFrom(src interface{}) { m.UnlockKey = make([]byte, len(o.UnlockKey)) copy(m.UnlockKey, o.UnlockKey) } - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Version, &o.Version) + deepcopy.Copy(&m.Version, &o.Version) } // Reference imports to suppress errors if they are not otherwise used. @@ -949,24 +946,6 @@ func (m *GetUnlockKeyResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Ca(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Ca(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintCa(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -985,12 +964,12 @@ type raftProxyCAServer struct { func NewRaftProxyCAServer(local CAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) CAServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -998,7 +977,7 @@ func NewRaftProxyCAServer(local CAServer, connSelector raftselector.ConnProvider md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) @@ -1127,12 +1106,12 @@ type raftProxyNodeCAServer struct { func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) NodeCAServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -1140,7 +1119,7 @@ func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftselector.Conn md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) diff --git a/vendor/github.com/docker/swarmkit/api/control.pb.go b/vendor/github.com/docker/swarmkit/api/control.pb.go index fb28c5d33..2f158042f 100644 --- a/vendor/github.com/docker/swarmkit/api/control.pb.go +++ b/vendor/github.com/docker/swarmkit/api/control.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/control.proto -// DO NOT EDIT! package api @@ -10,23 +9,21 @@ import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/docker/swarmkit/protobuf/plugin" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" import raftselector "github.com/docker/swarmkit/manager/raftselector" import codes "google.golang.org/grpc/codes" import status "google.golang.org/grpc/status" import metadata "google.golang.org/grpc/metadata" -import transport "google.golang.org/grpc/transport" +import peer "google.golang.org/grpc/peer" import rafttime "time" import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -1055,7 +1052,7 @@ func (m *GetNodeResponse) CopyFrom(src interface{}) { *m = *o if o.Node != nil { m.Node = &Node{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) + deepcopy.Copy(m.Node, o.Node) } } @@ -1074,7 +1071,7 @@ func (m *ListNodesRequest) CopyFrom(src interface{}) { *m = *o if o.Filters != nil { m.Filters = &ListNodesRequest_Filters{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + deepcopy.Copy(m.Filters, o.Filters) } } @@ -1142,7 +1139,7 @@ func (m *ListNodesResponse) CopyFrom(src interface{}) { m.Nodes = make([]*Node, len(o.Nodes)) for i := range m.Nodes { m.Nodes[i] = &Node{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + deepcopy.Copy(m.Nodes[i], o.Nodes[i]) } } @@ -1163,11 +1160,11 @@ func (m *UpdateNodeRequest) CopyFrom(src interface{}) { *m = *o if o.NodeVersion != nil { m.NodeVersion = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.NodeVersion, o.NodeVersion) + deepcopy.Copy(m.NodeVersion, o.NodeVersion) } if o.Spec != nil { m.Spec = &NodeSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } } @@ -1186,7 +1183,7 @@ func (m *UpdateNodeResponse) CopyFrom(src interface{}) { *m = *o if o.Node != nil { m.Node = &Node{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) + deepcopy.Copy(m.Node, o.Node) } } @@ -1245,7 +1242,7 @@ func (m *GetTaskResponse) CopyFrom(src interface{}) { *m = *o if o.Task != nil { m.Task = &Task{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Task, o.Task) + deepcopy.Copy(m.Task, o.Task) } } @@ -1289,7 +1286,7 @@ func (m *ListTasksRequest) CopyFrom(src interface{}) { *m = *o if o.Filters != nil { m.Filters = &ListTasksRequest_Filters{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + deepcopy.Copy(m.Filters, o.Filters) } } @@ -1367,7 +1364,7 @@ func (m *ListTasksResponse) CopyFrom(src interface{}) { m.Tasks = make([]*Task, len(o.Tasks)) for i := range m.Tasks { m.Tasks[i] = &Task{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) } } @@ -1388,7 +1385,7 @@ func (m *CreateServiceRequest) CopyFrom(src interface{}) { *m = *o if o.Spec != nil { m.Spec = &ServiceSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } } @@ -1407,7 +1404,7 @@ func (m *CreateServiceResponse) CopyFrom(src interface{}) { *m = *o if o.Service != nil { m.Service = &Service{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service) + deepcopy.Copy(m.Service, o.Service) } } @@ -1441,7 +1438,7 @@ func (m *GetServiceResponse) CopyFrom(src interface{}) { *m = *o if o.Service != nil { m.Service = &Service{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service) + deepcopy.Copy(m.Service, o.Service) } } @@ -1460,11 +1457,11 @@ func (m *UpdateServiceRequest) CopyFrom(src interface{}) { *m = *o if o.ServiceVersion != nil { m.ServiceVersion = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.ServiceVersion, o.ServiceVersion) + deepcopy.Copy(m.ServiceVersion, o.ServiceVersion) } if o.Spec != nil { m.Spec = &ServiceSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } } @@ -1483,7 +1480,7 @@ func (m *UpdateServiceResponse) CopyFrom(src interface{}) { *m = *o if o.Service != nil { m.Service = &Service{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service) + deepcopy.Copy(m.Service, o.Service) } } @@ -1527,7 +1524,7 @@ func (m *ListServicesRequest) CopyFrom(src interface{}) { *m = *o if o.Filters != nil { m.Filters = &ListServicesRequest_Filters{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + deepcopy.Copy(m.Filters, o.Filters) } } @@ -1590,7 +1587,7 @@ func (m *ListServicesResponse) CopyFrom(src interface{}) { m.Services = make([]*Service, len(o.Services)) for i := range m.Services { m.Services[i] = &Service{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Services[i], o.Services[i]) + deepcopy.Copy(m.Services[i], o.Services[i]) } } @@ -1611,7 +1608,7 @@ func (m *CreateNetworkRequest) CopyFrom(src interface{}) { *m = *o if o.Spec != nil { m.Spec = &NetworkSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } } @@ -1630,7 +1627,7 @@ func (m *CreateNetworkResponse) CopyFrom(src interface{}) { *m = *o if o.Network != nil { m.Network = &Network{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network) + deepcopy.Copy(m.Network, o.Network) } } @@ -1664,7 +1661,7 @@ func (m *GetNetworkResponse) CopyFrom(src interface{}) { *m = *o if o.Network != nil { m.Network = &Network{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network) + deepcopy.Copy(m.Network, o.Network) } } @@ -1708,7 +1705,7 @@ func (m *ListNetworksRequest) CopyFrom(src interface{}) { *m = *o if o.Filters != nil { m.Filters = &ListNetworksRequest_Filters{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + deepcopy.Copy(m.Filters, o.Filters) } } @@ -1766,7 +1763,7 @@ func (m *ListNetworksResponse) CopyFrom(src interface{}) { m.Networks = make([]*Network, len(o.Networks)) for i := range m.Networks { m.Networks[i] = &Network{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + deepcopy.Copy(m.Networks[i], o.Networks[i]) } } @@ -1802,7 +1799,7 @@ func (m *GetClusterResponse) CopyFrom(src interface{}) { *m = *o if o.Cluster != nil { m.Cluster = &Cluster{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Cluster, o.Cluster) + deepcopy.Copy(m.Cluster, o.Cluster) } } @@ -1821,7 +1818,7 @@ func (m *ListClustersRequest) CopyFrom(src interface{}) { *m = *o if o.Filters != nil { m.Filters = &ListClustersRequest_Filters{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + deepcopy.Copy(m.Filters, o.Filters) } } @@ -1879,7 +1876,7 @@ func (m *ListClustersResponse) CopyFrom(src interface{}) { m.Clusters = make([]*Cluster, len(o.Clusters)) for i := range m.Clusters { m.Clusters[i] = &Cluster{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + deepcopy.Copy(m.Clusters[i], o.Clusters[i]) } } @@ -1915,13 +1912,13 @@ func (m *UpdateClusterRequest) CopyFrom(src interface{}) { *m = *o if o.ClusterVersion != nil { m.ClusterVersion = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.ClusterVersion, o.ClusterVersion) + deepcopy.Copy(m.ClusterVersion, o.ClusterVersion) } if o.Spec != nil { m.Spec = &ClusterSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Rotation, &o.Rotation) + deepcopy.Copy(&m.Rotation, &o.Rotation) } func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse { @@ -1939,7 +1936,7 @@ func (m *UpdateClusterResponse) CopyFrom(src interface{}) { *m = *o if o.Cluster != nil { m.Cluster = &Cluster{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Cluster, o.Cluster) + deepcopy.Copy(m.Cluster, o.Cluster) } } @@ -1973,7 +1970,7 @@ func (m *GetSecretResponse) CopyFrom(src interface{}) { *m = *o if o.Secret != nil { m.Secret = &Secret{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) + deepcopy.Copy(m.Secret, o.Secret) } } @@ -1992,11 +1989,11 @@ func (m *UpdateSecretRequest) CopyFrom(src interface{}) { *m = *o if o.SecretVersion != nil { m.SecretVersion = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.SecretVersion, o.SecretVersion) + deepcopy.Copy(m.SecretVersion, o.SecretVersion) } if o.Spec != nil { m.Spec = &SecretSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } } @@ -2015,7 +2012,7 @@ func (m *UpdateSecretResponse) CopyFrom(src interface{}) { *m = *o if o.Secret != nil { m.Secret = &Secret{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) + deepcopy.Copy(m.Secret, o.Secret) } } @@ -2034,7 +2031,7 @@ func (m *ListSecretsRequest) CopyFrom(src interface{}) { *m = *o if o.Filters != nil { m.Filters = &ListSecretsRequest_Filters{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + deepcopy.Copy(m.Filters, o.Filters) } } @@ -2092,7 +2089,7 @@ func (m *ListSecretsResponse) CopyFrom(src interface{}) { m.Secrets = make([]*Secret, len(o.Secrets)) for i := range m.Secrets { m.Secrets[i] = &Secret{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) } } @@ -2113,7 +2110,7 @@ func (m *CreateSecretRequest) CopyFrom(src interface{}) { *m = *o if o.Spec != nil { m.Spec = &SecretSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } } @@ -2132,7 +2129,7 @@ func (m *CreateSecretResponse) CopyFrom(src interface{}) { *m = *o if o.Secret != nil { m.Secret = &Secret{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) + deepcopy.Copy(m.Secret, o.Secret) } } @@ -2191,7 +2188,7 @@ func (m *GetConfigResponse) CopyFrom(src interface{}) { *m = *o if o.Config != nil { m.Config = &Config{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) + deepcopy.Copy(m.Config, o.Config) } } @@ -2210,11 +2207,11 @@ func (m *UpdateConfigRequest) CopyFrom(src interface{}) { *m = *o if o.ConfigVersion != nil { m.ConfigVersion = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.ConfigVersion, o.ConfigVersion) + deepcopy.Copy(m.ConfigVersion, o.ConfigVersion) } if o.Spec != nil { m.Spec = &ConfigSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } } @@ -2233,7 +2230,7 @@ func (m *UpdateConfigResponse) CopyFrom(src interface{}) { *m = *o if o.Config != nil { m.Config = &Config{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) + deepcopy.Copy(m.Config, o.Config) } } @@ -2252,7 +2249,7 @@ func (m *ListConfigsRequest) CopyFrom(src interface{}) { *m = *o if o.Filters != nil { m.Filters = &ListConfigsRequest_Filters{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + deepcopy.Copy(m.Filters, o.Filters) } } @@ -2310,7 +2307,7 @@ func (m *ListConfigsResponse) CopyFrom(src interface{}) { m.Configs = make([]*Config, len(o.Configs)) for i := range m.Configs { m.Configs[i] = &Config{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i]) + deepcopy.Copy(m.Configs[i], o.Configs[i]) } } @@ -2331,7 +2328,7 @@ func (m *CreateConfigRequest) CopyFrom(src interface{}) { *m = *o if o.Spec != nil { m.Spec = &ConfigSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } } @@ -2350,7 +2347,7 @@ func (m *CreateConfigResponse) CopyFrom(src interface{}) { *m = *o if o.Config != nil { m.Config = &Config{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) + deepcopy.Copy(m.Config, o.Config) } } @@ -5822,24 +5819,6 @@ func (m *RemoveConfigResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Control(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Control(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintControl(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -5858,12 +5837,12 @@ type raftProxyControlServer struct { func NewRaftProxyControlServer(local ControlServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ControlServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -5871,7 +5850,7 @@ func NewRaftProxyControlServer(local ControlServer, connSelector raftselector.Co md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) @@ -7898,7 +7877,7 @@ func (this *ListNodesRequest_Filters) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -8024,7 +8003,7 @@ func (this *ListTasksRequest_Filters) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -8155,7 +8134,7 @@ func (this *ListServicesRequest_Filters) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -8260,7 +8239,7 @@ func (this *ListNetworksRequest_Filters) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -8323,7 +8302,7 @@ func (this *ListClustersRequest_Filters) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -8443,7 +8422,7 @@ func (this *ListSecretsRequest_Filters) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -8567,7 +8546,7 @@ func (this *ListConfigsRequest_Filters) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -8997,51 +8976,14 @@ func (m *ListNodesRequest_Filters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -9051,41 +8993,80 @@ func (m *ListNodesRequest_Filters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType == 0 { @@ -10206,51 +10187,14 @@ func (m *ListTasksRequest_Filters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -10260,41 +10204,80 @@ func (m *ListTasksRequest_Filters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -11516,51 +11499,14 @@ func (m *ListServicesRequest_Filters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -11570,41 +11516,80 @@ func (m *ListServicesRequest_Filters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -12477,51 +12462,14 @@ func (m *ListNetworksRequest_Filters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if m.Labels == nil { + m.Labels = make(map[string]string) } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Labels == nil { - m.Labels = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -12531,41 +12479,80 @@ func (m *ListNetworksRequest_Filters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -13056,51 +13043,14 @@ func (m *ListClustersRequest_Filters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -13110,41 +13060,80 @@ func (m *ListClustersRequest_Filters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -14231,51 +14220,14 @@ func (m *ListSecretsRequest_Filters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -14285,41 +14237,80 @@ func (m *ListSecretsRequest_Filters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -15333,51 +15324,14 @@ func (m *ListConfigsRequest_Filters) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -15387,41 +15341,80 @@ func (m *ListConfigsRequest_Filters) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { diff --git a/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go b/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go index 64e370ca8..8880bd606 100644 --- a/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go +++ b/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go @@ -44,6 +44,9 @@ func Copy(dst, src interface{}) { case *types.Timestamp: src := src.(*types.Timestamp) *dst = *src + case *types.BoolValue: + src := src.(*types.BoolValue) + *dst = *src case CopierFrom: dst.CopyFrom(src) default: diff --git a/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go b/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go index 120df8811..cc443848d 100644 --- a/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go +++ b/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/dispatcher.proto -// DO NOT EDIT! package api @@ -13,20 +12,18 @@ import _ "github.com/gogo/protobuf/types" import time "time" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" -import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" +import types "github.com/gogo/protobuf/types" import raftselector "github.com/docker/swarmkit/manager/raftselector" import codes "google.golang.org/grpc/codes" import status "google.golang.org/grpc/status" import metadata "google.golang.org/grpc/metadata" -import transport "google.golang.org/grpc/transport" +import peer "google.golang.org/grpc/peer" import rafttime "time" import strings "strings" @@ -511,7 +508,7 @@ func (m *SessionRequest) CopyFrom(src interface{}) { *m = *o if o.Description != nil { m.Description = &NodeDescription{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Description, o.Description) + deepcopy.Copy(m.Description, o.Description) } } @@ -530,13 +527,13 @@ func (m *SessionMessage) CopyFrom(src interface{}) { *m = *o if o.Node != nil { m.Node = &Node{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) + deepcopy.Copy(m.Node, o.Node) } if o.Managers != nil { m.Managers = make([]*WeightedPeer, len(o.Managers)) for i := range m.Managers { m.Managers[i] = &WeightedPeer{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Managers[i], o.Managers[i]) + deepcopy.Copy(m.Managers[i], o.Managers[i]) } } @@ -544,7 +541,7 @@ func (m *SessionMessage) CopyFrom(src interface{}) { m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) for i := range m.NetworkBootstrapKeys { m.NetworkBootstrapKeys[i] = &EncryptionKey{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) } } @@ -582,7 +579,7 @@ func (m *HeartbeatResponse) CopyFrom(src interface{}) { o := src.(*HeartbeatResponse) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Period, &o.Period) + deepcopy.Copy(&m.Period, &o.Period) } func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest { @@ -602,7 +599,7 @@ func (m *UpdateTaskStatusRequest) CopyFrom(src interface{}) { m.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, len(o.Updates)) for i := range m.Updates { m.Updates[i] = &UpdateTaskStatusRequest_TaskStatusUpdate{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Updates[i], o.Updates[i]) + deepcopy.Copy(m.Updates[i], o.Updates[i]) } } @@ -623,7 +620,7 @@ func (m *UpdateTaskStatusRequest_TaskStatusUpdate) CopyFrom(src interface{}) { *m = *o if o.Status != nil { m.Status = &TaskStatus{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Status, o.Status) + deepcopy.Copy(m.Status, o.Status) } } @@ -669,7 +666,7 @@ func (m *TasksMessage) CopyFrom(src interface{}) { m.Tasks = make([]*Task, len(o.Tasks)) for i := range m.Tasks { m.Tasks[i] = &Task{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) } } @@ -709,19 +706,19 @@ func (m *Assignment) CopyFrom(src interface{}) { v := Assignment_Task{ Task: &Task{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Task, o.GetTask()) + deepcopy.Copy(v.Task, o.GetTask()) m.Item = &v case *Assignment_Secret: v := Assignment_Secret{ Secret: &Secret{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Secret, o.GetSecret()) + deepcopy.Copy(v.Secret, o.GetSecret()) m.Item = &v case *Assignment_Config: v := Assignment_Config{ Config: &Config{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig()) + deepcopy.Copy(v.Config, o.GetConfig()) m.Item = &v } } @@ -743,7 +740,7 @@ func (m *AssignmentChange) CopyFrom(src interface{}) { *m = *o if o.Assignment != nil { m.Assignment = &Assignment{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Assignment, o.Assignment) + deepcopy.Copy(m.Assignment, o.Assignment) } } @@ -764,7 +761,7 @@ func (m *AssignmentsMessage) CopyFrom(src interface{}) { m.Changes = make([]*AssignmentChange, len(o.Changes)) for i := range m.Changes { m.Changes[i] = &AssignmentChange{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Changes[i], o.Changes[i]) + deepcopy.Copy(m.Changes[i], o.Changes[i]) } } @@ -1243,8 +1240,8 @@ func (m *HeartbeatResponse) MarshalTo(dAtA []byte) (int, error) { _ = l dAtA[i] = 0xa i++ - i = encodeVarintDispatcher(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.Period))) - n3, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Period, dAtA[i:]) + i = encodeVarintDispatcher(dAtA, i, uint64(types.SizeOfStdDuration(m.Period))) + n3, err := types.StdDurationMarshalTo(m.Period, dAtA[i:]) if err != nil { return 0, err } @@ -1565,24 +1562,6 @@ func (m *AssignmentsMessage) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Dispatcher(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Dispatcher(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintDispatcher(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1601,12 +1580,12 @@ type raftProxyDispatcherServer struct { func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) DispatcherServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -1614,7 +1593,7 @@ func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector raftselec md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) @@ -1945,7 +1924,7 @@ func (m *HeartbeatRequest) Size() (n int) { func (m *HeartbeatResponse) Size() (n int) { var l int _ = l - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Period) + l = types.SizeOfStdDuration(m.Period) n += 1 + l + sovDispatcher(uint64(l)) return n } @@ -2732,7 +2711,7 @@ func (m *HeartbeatResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Period, dAtA[iNdEx:postIndex]); err != nil { + if err := types.StdDurationUnmarshal(&m.Period, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/github.com/docker/swarmkit/api/health.pb.go b/vendor/github.com/docker/swarmkit/api/health.pb.go index ed7df7305..453e01fc3 100644 --- a/vendor/github.com/docker/swarmkit/api/health.pb.go +++ b/vendor/github.com/docker/swarmkit/api/health.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/health.proto -// DO NOT EDIT! package api @@ -10,16 +9,14 @@ import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/docker/swarmkit/protobuf/plugin" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" import raftselector "github.com/docker/swarmkit/manager/raftselector" import codes "google.golang.org/grpc/codes" import status "google.golang.org/grpc/status" import metadata "google.golang.org/grpc/metadata" -import transport "google.golang.org/grpc/transport" +import peer "google.golang.org/grpc/peer" import rafttime "time" import strings "strings" @@ -249,24 +246,6 @@ func (m *HealthCheckResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Health(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Health(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintHealth(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -285,12 +264,12 @@ type raftProxyHealthServer struct { func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) HealthServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -298,7 +277,7 @@ func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.Conn md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) diff --git a/vendor/github.com/docker/swarmkit/api/logbroker.pb.go b/vendor/github.com/docker/swarmkit/api/logbroker.pb.go index 1108088fb..b6231e941 100644 --- a/vendor/github.com/docker/swarmkit/api/logbroker.pb.go +++ b/vendor/github.com/docker/swarmkit/api/logbroker.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/logbroker.proto -// DO NOT EDIT! package api @@ -11,18 +10,16 @@ import _ "github.com/gogo/protobuf/gogoproto" import google_protobuf "github.com/gogo/protobuf/types" import _ "github.com/docker/swarmkit/protobuf/plugin" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" import raftselector "github.com/docker/swarmkit/manager/raftselector" import codes "google.golang.org/grpc/codes" import status "google.golang.org/grpc/status" import metadata "google.golang.org/grpc/metadata" -import transport "google.golang.org/grpc/transport" +import peer "google.golang.org/grpc/peer" import rafttime "time" import strings "strings" @@ -305,7 +302,7 @@ func (m *LogSubscriptionOptions) CopyFrom(src interface{}) { if o.Since != nil { m.Since = &google_protobuf.Timestamp{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Since, o.Since) + deepcopy.Copy(m.Since, o.Since) } } @@ -382,10 +379,10 @@ func (m *LogMessage) CopyFrom(src interface{}) { o := src.(*LogMessage) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Context, &o.Context) + deepcopy.Copy(&m.Context, &o.Context) if o.Timestamp != nil { m.Timestamp = &google_protobuf.Timestamp{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Timestamp, o.Timestamp) + deepcopy.Copy(m.Timestamp, o.Timestamp) } if o.Data != nil { m.Data = make([]byte, len(o.Data)) @@ -394,7 +391,7 @@ func (m *LogMessage) CopyFrom(src interface{}) { if o.Attrs != nil { m.Attrs = make([]LogAttr, len(o.Attrs)) for i := range m.Attrs { - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Attrs[i], &o.Attrs[i]) + deepcopy.Copy(&m.Attrs[i], &o.Attrs[i]) } } @@ -415,11 +412,11 @@ func (m *SubscribeLogsRequest) CopyFrom(src interface{}) { *m = *o if o.Selector != nil { m.Selector = &LogSelector{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Selector, o.Selector) + deepcopy.Copy(m.Selector, o.Selector) } if o.Options != nil { m.Options = &LogSubscriptionOptions{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Options, o.Options) + deepcopy.Copy(m.Options, o.Options) } } @@ -439,7 +436,7 @@ func (m *SubscribeLogsMessage) CopyFrom(src interface{}) { if o.Messages != nil { m.Messages = make([]LogMessage, len(o.Messages)) for i := range m.Messages { - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + deepcopy.Copy(&m.Messages[i], &o.Messages[i]) } } @@ -470,11 +467,11 @@ func (m *SubscriptionMessage) CopyFrom(src interface{}) { *m = *o if o.Selector != nil { m.Selector = &LogSelector{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Selector, o.Selector) + deepcopy.Copy(m.Selector, o.Selector) } if o.Options != nil { m.Options = &LogSubscriptionOptions{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Options, o.Options) + deepcopy.Copy(m.Options, o.Options) } } @@ -494,7 +491,7 @@ func (m *PublishLogsMessage) CopyFrom(src interface{}) { if o.Messages != nil { m.Messages = make([]LogMessage, len(o.Messages)) for i := range m.Messages { - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + deepcopy.Copy(&m.Messages[i], &o.Messages[i]) } } @@ -1236,24 +1233,6 @@ func (m *PublishLogsResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Logbroker(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Logbroker(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintLogbroker(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1272,12 +1251,12 @@ type raftProxyLogsServer struct { func NewRaftProxyLogsServer(local LogsServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogsServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -1285,7 +1264,7 @@ func NewRaftProxyLogsServer(local LogsServer, connSelector raftselector.ConnProv md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) @@ -1395,12 +1374,12 @@ type raftProxyLogBrokerServer struct { func NewRaftProxyLogBrokerServer(local LogBrokerServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogBrokerServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -1408,7 +1387,7 @@ func NewRaftProxyLogBrokerServer(local LogBrokerServer, connSelector raftselecto md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) diff --git a/vendor/github.com/docker/swarmkit/api/objects.pb.go b/vendor/github.com/docker/swarmkit/api/objects.pb.go index 01abbe507..e7c95c517 100644 --- a/vendor/github.com/docker/swarmkit/api/objects.pb.go +++ b/vendor/github.com/docker/swarmkit/api/objects.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/objects.proto -// DO NOT EDIT! package api @@ -12,13 +11,13 @@ import _ "github.com/gogo/protobuf/gogoproto" import google_protobuf3 "github.com/gogo/protobuf/types" import _ "github.com/docker/swarmkit/protobuf/plugin" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" -import github_com_docker_go_events "github.com/docker/go-events" +import go_events "github.com/docker/go-events" import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -57,7 +56,7 @@ type Node struct { // ManagerStatus provides the current status of the node's manager // component, if the node is a manager. ManagerStatus *ManagerStatus `protobuf:"bytes,6,opt,name=manager_status,json=managerStatus" json:"manager_status,omitempty"` - // DEPRECATED: Use lb_attachments to find the ingress network + // DEPRECATED: Use Attachments to find the ingress network // The node attachment to the ingress network. Attachment *NetworkAttachment `protobuf:"bytes,7,opt,name=attachment" json:"attachment,omitempty"` // Certificate is the TLS certificate issued for the node, if any. @@ -270,6 +269,11 @@ type Cluster struct { // If the key is empty, the node will be unlocked (will not require a key // to start up from a shut down state). UnlockKeys []*EncryptionKey `protobuf:"bytes,9,rep,name=unlock_keys,json=unlockKeys" json:"unlock_keys,omitempty"` + // FIPS specifies whether this cluster should be in FIPS mode. This changes + // the format of the join tokens, and nodes that are not FIPS-enabled should + // reject joining the cluster. Nodes that report themselves to be non-FIPS + // should be rejected from the cluster. + FIPS bool `protobuf:"varint,10,opt,name=fips,proto3" json:"fips,omitempty"` } func (m *Cluster) Reset() { *m = Cluster{} } @@ -368,14 +372,14 @@ func (m *Meta) CopyFrom(src interface{}) { o := src.(*Meta) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Version, &o.Version) + deepcopy.Copy(&m.Version, &o.Version) if o.CreatedAt != nil { m.CreatedAt = &google_protobuf.Timestamp{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.CreatedAt, o.CreatedAt) + deepcopy.Copy(m.CreatedAt, o.CreatedAt) } if o.UpdatedAt != nil { m.UpdatedAt = &google_protobuf.Timestamp{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.UpdatedAt, o.UpdatedAt) + deepcopy.Copy(m.UpdatedAt, o.UpdatedAt) } } @@ -392,27 +396,27 @@ func (m *Node) CopyFrom(src interface{}) { o := src.(*Node) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) if o.Description != nil { m.Description = &NodeDescription{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Description, o.Description) + deepcopy.Copy(m.Description, o.Description) } - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status) + deepcopy.Copy(&m.Status, &o.Status) if o.ManagerStatus != nil { m.ManagerStatus = &ManagerStatus{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.ManagerStatus, o.ManagerStatus) + deepcopy.Copy(m.ManagerStatus, o.ManagerStatus) } if o.Attachment != nil { m.Attachment = &NetworkAttachment{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Attachment, o.Attachment) + deepcopy.Copy(m.Attachment, o.Attachment) } - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Certificate, &o.Certificate) + deepcopy.Copy(&m.Certificate, &o.Certificate) if o.Attachments != nil { m.Attachments = make([]*NetworkAttachment, len(o.Attachments)) for i := range m.Attachments { m.Attachments[i] = &NetworkAttachment{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Attachments[i], o.Attachments[i]) + deepcopy.Copy(m.Attachments[i], o.Attachments[i]) } } @@ -431,27 +435,27 @@ func (m *Service) CopyFrom(src interface{}) { o := src.(*Service) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) if o.SpecVersion != nil { m.SpecVersion = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.SpecVersion, o.SpecVersion) + deepcopy.Copy(m.SpecVersion, o.SpecVersion) } if o.PreviousSpec != nil { m.PreviousSpec = &ServiceSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.PreviousSpec, o.PreviousSpec) + deepcopy.Copy(m.PreviousSpec, o.PreviousSpec) } if o.PreviousSpecVersion != nil { m.PreviousSpecVersion = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.PreviousSpecVersion, o.PreviousSpecVersion) + deepcopy.Copy(m.PreviousSpecVersion, o.PreviousSpecVersion) } if o.Endpoint != nil { m.Endpoint = &Endpoint{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Endpoint, o.Endpoint) + deepcopy.Copy(m.Endpoint, o.Endpoint) } if o.UpdateStatus != nil { m.UpdateStatus = &UpdateStatus{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.UpdateStatus, o.UpdateStatus) + deepcopy.Copy(m.UpdateStatus, o.UpdateStatus) } } @@ -470,13 +474,13 @@ func (m *Endpoint) CopyFrom(src interface{}) { *m = *o if o.Spec != nil { m.Spec = &EndpointSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + deepcopy.Copy(m.Spec, o.Spec) } if o.Ports != nil { m.Ports = make([]*PortConfig, len(o.Ports)) for i := range m.Ports { m.Ports[i] = &PortConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Ports[i], o.Ports[i]) + deepcopy.Copy(m.Ports[i], o.Ports[i]) } } @@ -484,7 +488,7 @@ func (m *Endpoint) CopyFrom(src interface{}) { m.VirtualIPs = make([]*Endpoint_VirtualIP, len(o.VirtualIPs)) for i := range m.VirtualIPs { m.VirtualIPs[i] = &Endpoint_VirtualIP{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.VirtualIPs[i], o.VirtualIPs[i]) + deepcopy.Copy(m.VirtualIPs[i], o.VirtualIPs[i]) } } @@ -518,36 +522,36 @@ func (m *Task) CopyFrom(src interface{}) { o := src.(*Task) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) if o.SpecVersion != nil { m.SpecVersion = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.SpecVersion, o.SpecVersion) + deepcopy.Copy(m.SpecVersion, o.SpecVersion) } - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.ServiceAnnotations, &o.ServiceAnnotations) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status) + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.ServiceAnnotations, &o.ServiceAnnotations) + deepcopy.Copy(&m.Status, &o.Status) if o.Networks != nil { m.Networks = make([]*NetworkAttachment, len(o.Networks)) for i := range m.Networks { m.Networks[i] = &NetworkAttachment{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + deepcopy.Copy(m.Networks[i], o.Networks[i]) } } if o.Endpoint != nil { m.Endpoint = &Endpoint{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Endpoint, o.Endpoint) + deepcopy.Copy(m.Endpoint, o.Endpoint) } if o.LogDriver != nil { m.LogDriver = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.LogDriver, o.LogDriver) + deepcopy.Copy(m.LogDriver, o.LogDriver) } if o.AssignedGenericResources != nil { m.AssignedGenericResources = make([]*GenericResource, len(o.AssignedGenericResources)) for i := range m.AssignedGenericResources { m.AssignedGenericResources[i] = &GenericResource{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.AssignedGenericResources[i], o.AssignedGenericResources[i]) + deepcopy.Copy(m.AssignedGenericResources[i], o.AssignedGenericResources[i]) } } @@ -568,7 +572,7 @@ func (m *NetworkAttachment) CopyFrom(src interface{}) { *m = *o if o.Network != nil { m.Network = &Network{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network) + deepcopy.Copy(m.Network, o.Network) } if o.Addresses != nil { m.Addresses = make([]string, len(o.Addresses)) @@ -602,15 +606,15 @@ func (m *Network) CopyFrom(src interface{}) { o := src.(*Network) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) if o.DriverState != nil { m.DriverState = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.DriverState, o.DriverState) + deepcopy.Copy(m.DriverState, o.DriverState) } if o.IPAM != nil { m.IPAM = &IPAMOptions{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.IPAM, o.IPAM) + deepcopy.Copy(m.IPAM, o.IPAM) } } @@ -627,14 +631,14 @@ func (m *Cluster) CopyFrom(src interface{}) { o := src.(*Cluster) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.RootCA, &o.RootCA) + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.RootCA, &o.RootCA) if o.NetworkBootstrapKeys != nil { m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) for i := range m.NetworkBootstrapKeys { m.NetworkBootstrapKeys[i] = &EncryptionKey{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) } } @@ -642,7 +646,7 @@ func (m *Cluster) CopyFrom(src interface{}) { m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate, len(o.BlacklistedCertificates)) for k, v := range o.BlacklistedCertificates { m.BlacklistedCertificates[k] = &BlacklistedCertificate{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.BlacklistedCertificates[k], v) + deepcopy.Copy(m.BlacklistedCertificates[k], v) } } @@ -650,7 +654,7 @@ func (m *Cluster) CopyFrom(src interface{}) { m.UnlockKeys = make([]*EncryptionKey, len(o.UnlockKeys)) for i := range m.UnlockKeys { m.UnlockKeys[i] = &EncryptionKey{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.UnlockKeys[i], o.UnlockKeys[i]) + deepcopy.Copy(m.UnlockKeys[i], o.UnlockKeys[i]) } } @@ -669,8 +673,8 @@ func (m *Secret) CopyFrom(src interface{}) { o := src.(*Secret) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) } func (m *Config) Copy() *Config { @@ -686,8 +690,8 @@ func (m *Config) CopyFrom(src interface{}) { o := src.(*Config) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) } func (m *Resource) Copy() *Resource { @@ -703,11 +707,11 @@ func (m *Resource) CopyFrom(src interface{}) { o := src.(*Resource) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Annotations, &o.Annotations) if o.Payload != nil { m.Payload = &google_protobuf3.Any{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload) + deepcopy.Copy(m.Payload, o.Payload) } } @@ -724,8 +728,8 @@ func (m *Extension) CopyFrom(src interface{}) { o := src.(*Extension) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Annotations, &o.Annotations) } func (m *Meta) Marshal() (dAtA []byte, err error) { @@ -1426,6 +1430,16 @@ func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.FIPS { + dAtA[i] = 0x50 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -1621,24 +1635,6 @@ func (m *Extension) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Objects(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Objects(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintObjects(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1925,6 +1921,9 @@ func (m *Cluster) Size() (n int) { n += 1 + l + sovObjects(uint64(l)) } } + if m.FIPS { + n += 2 + } return n } @@ -2020,7 +2019,7 @@ type EventCreateNode struct { Checks []NodeCheckFunc } -func (e EventCreateNode) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventCreateNode) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventCreateNode) if !ok { return false @@ -2040,7 +2039,7 @@ type EventUpdateNode struct { Checks []NodeCheckFunc } -func (e EventUpdateNode) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventUpdateNode) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventUpdateNode) if !ok { return false @@ -2059,7 +2058,7 @@ type EventDeleteNode struct { Checks []NodeCheckFunc } -func (e EventDeleteNode) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventDeleteNode) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventDeleteNode) if !ok { return false @@ -2267,7 +2266,7 @@ type EventCreateService struct { Checks []ServiceCheckFunc } -func (e EventCreateService) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventCreateService) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventCreateService) if !ok { return false @@ -2287,7 +2286,7 @@ type EventUpdateService struct { Checks []ServiceCheckFunc } -func (e EventUpdateService) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventUpdateService) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventUpdateService) if !ok { return false @@ -2306,7 +2305,7 @@ type EventDeleteService struct { Checks []ServiceCheckFunc } -func (e EventDeleteService) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventDeleteService) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventDeleteService) if !ok { return false @@ -2484,7 +2483,7 @@ type EventCreateTask struct { Checks []TaskCheckFunc } -func (e EventCreateTask) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventCreateTask) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventCreateTask) if !ok { return false @@ -2504,7 +2503,7 @@ type EventUpdateTask struct { Checks []TaskCheckFunc } -func (e EventUpdateTask) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventUpdateTask) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventUpdateTask) if !ok { return false @@ -2523,7 +2522,7 @@ type EventDeleteTask struct { Checks []TaskCheckFunc } -func (e EventDeleteTask) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventDeleteTask) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventDeleteTask) if !ok { return false @@ -2744,7 +2743,7 @@ type EventCreateNetwork struct { Checks []NetworkCheckFunc } -func (e EventCreateNetwork) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventCreateNetwork) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventCreateNetwork) if !ok { return false @@ -2764,7 +2763,7 @@ type EventUpdateNetwork struct { Checks []NetworkCheckFunc } -func (e EventUpdateNetwork) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventUpdateNetwork) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventUpdateNetwork) if !ok { return false @@ -2783,7 +2782,7 @@ type EventDeleteNetwork struct { Checks []NetworkCheckFunc } -func (e EventDeleteNetwork) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventDeleteNetwork) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventDeleteNetwork) if !ok { return false @@ -2961,7 +2960,7 @@ type EventCreateCluster struct { Checks []ClusterCheckFunc } -func (e EventCreateCluster) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventCreateCluster) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventCreateCluster) if !ok { return false @@ -2981,7 +2980,7 @@ type EventUpdateCluster struct { Checks []ClusterCheckFunc } -func (e EventUpdateCluster) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventUpdateCluster) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventUpdateCluster) if !ok { return false @@ -3000,7 +2999,7 @@ type EventDeleteCluster struct { Checks []ClusterCheckFunc } -func (e EventDeleteCluster) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventDeleteCluster) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventDeleteCluster) if !ok { return false @@ -3178,7 +3177,7 @@ type EventCreateSecret struct { Checks []SecretCheckFunc } -func (e EventCreateSecret) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventCreateSecret) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventCreateSecret) if !ok { return false @@ -3198,7 +3197,7 @@ type EventUpdateSecret struct { Checks []SecretCheckFunc } -func (e EventUpdateSecret) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventUpdateSecret) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventUpdateSecret) if !ok { return false @@ -3217,7 +3216,7 @@ type EventDeleteSecret struct { Checks []SecretCheckFunc } -func (e EventDeleteSecret) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventDeleteSecret) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventDeleteSecret) if !ok { return false @@ -3395,7 +3394,7 @@ type EventCreateConfig struct { Checks []ConfigCheckFunc } -func (e EventCreateConfig) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventCreateConfig) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventCreateConfig) if !ok { return false @@ -3415,7 +3414,7 @@ type EventUpdateConfig struct { Checks []ConfigCheckFunc } -func (e EventUpdateConfig) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventUpdateConfig) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventUpdateConfig) if !ok { return false @@ -3434,7 +3433,7 @@ type EventDeleteConfig struct { Checks []ConfigCheckFunc } -func (e EventDeleteConfig) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventDeleteConfig) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventDeleteConfig) if !ok { return false @@ -3612,7 +3611,7 @@ type EventCreateResource struct { Checks []ResourceCheckFunc } -func (e EventCreateResource) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventCreateResource) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventCreateResource) if !ok { return false @@ -3632,7 +3631,7 @@ type EventUpdateResource struct { Checks []ResourceCheckFunc } -func (e EventUpdateResource) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventUpdateResource) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventUpdateResource) if !ok { return false @@ -3651,7 +3650,7 @@ type EventDeleteResource struct { Checks []ResourceCheckFunc } -func (e EventDeleteResource) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventDeleteResource) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventDeleteResource) if !ok { return false @@ -3835,7 +3834,7 @@ type EventCreateExtension struct { Checks []ExtensionCheckFunc } -func (e EventCreateExtension) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventCreateExtension) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventCreateExtension) if !ok { return false @@ -3855,7 +3854,7 @@ type EventUpdateExtension struct { Checks []ExtensionCheckFunc } -func (e EventUpdateExtension) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventUpdateExtension) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventUpdateExtension) if !ok { return false @@ -3874,7 +3873,7 @@ type EventDeleteExtension struct { Checks []ExtensionCheckFunc } -func (e EventDeleteExtension) Matches(apiEvent github_com_docker_go_events.Event) bool { +func (e EventDeleteExtension) Matches(apiEvent go_events.Event) bool { typedEvent, ok := apiEvent.(EventDeleteExtension) if !ok { return false @@ -4491,7 +4490,7 @@ func (this *NetworkAttachment) String() string { for k, _ := range this.DriverAttachmentOpts { keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForDriverAttachmentOpts) + sortkeys.Strings(keysForDriverAttachmentOpts) mapStringForDriverAttachmentOpts := "map[string]string{" for _, k := range keysForDriverAttachmentOpts { mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) @@ -4528,7 +4527,7 @@ func (this *Cluster) String() string { for k, _ := range this.BlacklistedCertificates { keysForBlacklistedCertificates = append(keysForBlacklistedCertificates, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForBlacklistedCertificates) + sortkeys.Strings(keysForBlacklistedCertificates) mapStringForBlacklistedCertificates := "map[string]*BlacklistedCertificate{" for _, k := range keysForBlacklistedCertificates { mapStringForBlacklistedCertificates += fmt.Sprintf("%v: %v,", k, this.BlacklistedCertificates[k]) @@ -4543,6 +4542,7 @@ func (this *Cluster) String() string { `EncryptionKeyLamportClock:` + fmt.Sprintf("%v", this.EncryptionKeyLamportClock) + `,`, `BlacklistedCertificates:` + mapStringForBlacklistedCertificates + `,`, `UnlockKeys:` + strings.Replace(fmt.Sprintf("%v", this.UnlockKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, `}`, }, "") return s @@ -6290,51 +6290,14 @@ func (m *NetworkAttachment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowObjects - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowObjects - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthObjects - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.DriverAttachmentOpts == nil { m.DriverAttachmentOpts = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowObjects @@ -6344,41 +6307,80 @@ func (m *NetworkAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowObjects + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthObjects - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.DriverAttachmentOpts[mapkey] = mapvalue - } else { - var mapvalue string - m.DriverAttachmentOpts[mapkey] = mapvalue } + m.DriverAttachmentOpts[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -6830,51 +6832,14 @@ func (m *Cluster) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowObjects - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowObjects - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthObjects - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.BlacklistedCertificates == nil { m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *BlacklistedCertificate + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowObjects @@ -6884,46 +6849,85 @@ func (m *Cluster) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowObjects + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BlacklistedCertificate{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthObjects - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthObjects - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &BlacklistedCertificate{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.BlacklistedCertificates[mapkey] = mapvalue - } else { - var mapvalue *BlacklistedCertificate - m.BlacklistedCertificates[mapkey] = mapvalue } + m.BlacklistedCertificates[mapkey] = mapvalue iNdEx = postIndex case 9: if wireType != 2 { @@ -6956,6 +6960,26 @@ func (m *Cluster) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) default: iNdEx = preIndex skippy, err := skipObjects(dAtA[iNdEx:]) @@ -7752,101 +7776,102 @@ var ( func init() { proto.RegisterFile("github.com/docker/swarmkit/api/objects.proto", fileDescriptorObjects) } var fileDescriptorObjects = []byte{ - // 1527 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcf, 0x6f, 0x1b, 0x4f, - 0x15, 0xef, 0xda, 0x1b, 0xff, 0x78, 0x4e, 0x4c, 0x98, 0x86, 0xb0, 0x35, 0xc1, 0x0e, 0xae, 0x40, - 0x55, 0x55, 0x39, 0x25, 0x14, 0x48, 0x03, 0xa5, 0xb5, 0x93, 0xa8, 0xb5, 0x4a, 0x69, 0x34, 0x2d, - 0x2d, 0xb7, 0x65, 0xb2, 0x3b, 0x75, 0x17, 0xaf, 0x77, 0x56, 0x3b, 0x63, 0x17, 0xdf, 0x7a, 0x0e, - 0x7f, 0x40, 0x6e, 0x1c, 0xfa, 0x37, 0x70, 0xe1, 0xc2, 0x81, 0x53, 0x8f, 0x9c, 0x10, 0xa7, 0x88, - 0xfa, 0xbf, 0x40, 0xe2, 0x80, 0x66, 0x76, 0xd6, 0xde, 0xc4, 0x9b, 0x5f, 0xa8, 0x8a, 0xbe, 0xa7, - 0xcc, 0xec, 0x7c, 0x3e, 0xef, 0xd7, 0xbc, 0xf7, 0xe6, 0xc5, 0x70, 0xaf, 0xe7, 0x89, 0xf7, 0xc3, - 0x83, 0x96, 0xc3, 0x06, 0x1b, 0x2e, 0x73, 0xfa, 0x34, 0xda, 0xe0, 0x1f, 0x48, 0x34, 0xe8, 0x7b, - 0x62, 0x83, 0x84, 0xde, 0x06, 0x3b, 0xf8, 0x03, 0x75, 0x04, 0x6f, 0x85, 0x11, 0x13, 0x0c, 0xa1, - 0x18, 0xd2, 0x4a, 0x20, 0xad, 0xd1, 0x8f, 0x6b, 0x77, 0x2f, 0x90, 0x20, 0xc6, 0x21, 0xd5, 0xfc, - 0x0b, 0xb1, 0x3c, 0xa4, 0x4e, 0x82, 0x6d, 0xf4, 0x18, 0xeb, 0xf9, 0x74, 0x43, 0xed, 0x0e, 0x86, - 0xef, 0x36, 0x84, 0x37, 0xa0, 0x5c, 0x90, 0x41, 0xa8, 0x01, 0x2b, 0x3d, 0xd6, 0x63, 0x6a, 0xb9, - 0x21, 0x57, 0xfa, 0xeb, 0xad, 0xd3, 0x34, 0x12, 0x8c, 0xf5, 0xd1, 0xcf, 0xcf, 0xd1, 0x3e, 0x85, - 0x87, 0xfe, 0xb0, 0xe7, 0x05, 0xfa, 0x4f, 0x4c, 0x6c, 0xfe, 0xd5, 0x00, 0xf3, 0x05, 0x15, 0x04, - 0xfd, 0x02, 0x8a, 0x23, 0x1a, 0x71, 0x8f, 0x05, 0x96, 0xb1, 0x6e, 0xdc, 0xa9, 0x6c, 0x7e, 0xaf, - 0x35, 0x1f, 0x91, 0xd6, 0x9b, 0x18, 0xd2, 0x31, 0x3f, 0x1f, 0x37, 0x6e, 0xe0, 0x84, 0x81, 0x1e, - 0x02, 0x38, 0x11, 0x25, 0x82, 0xba, 0x36, 0x11, 0x56, 0x4e, 0xf1, 0x6b, 0xad, 0xd8, 0xdc, 0x56, - 0xa2, 0xbf, 0xf5, 0x3a, 0xf1, 0x12, 0x97, 0x35, 0xba, 0x2d, 0x24, 0x75, 0x18, 0xba, 0x09, 0x35, - 0x7f, 0x31, 0x55, 0xa3, 0xdb, 0xa2, 0xf9, 0x71, 0x01, 0xcc, 0xdf, 0x30, 0x97, 0xa2, 0x55, 0xc8, - 0x79, 0xae, 0x32, 0xbb, 0xdc, 0x29, 0x4c, 0x8e, 0x1b, 0xb9, 0xee, 0x2e, 0xce, 0x79, 0x2e, 0xda, - 0x04, 0x73, 0x40, 0x05, 0xd1, 0x06, 0x59, 0x59, 0x0e, 0x49, 0xdf, 0xb5, 0x37, 0x0a, 0x8b, 0x7e, - 0x06, 0xa6, 0xbc, 0x2a, 0x6d, 0xc9, 0x5a, 0x16, 0x47, 0xea, 0x7c, 0x15, 0x52, 0x27, 0xe1, 0x49, - 0x3c, 0xda, 0x83, 0x8a, 0x4b, 0xb9, 0x13, 0x79, 0xa1, 0x90, 0x31, 0x34, 0x15, 0xfd, 0xf6, 0x59, - 0xf4, 0xdd, 0x19, 0x14, 0xa7, 0x79, 0xe8, 0x97, 0x50, 0xe0, 0x82, 0x88, 0x21, 0xb7, 0x16, 0x94, - 0x84, 0xfa, 0x99, 0x06, 0x28, 0x94, 0x36, 0x41, 0x73, 0xd0, 0x33, 0xa8, 0x0e, 0x48, 0x40, 0x7a, - 0x34, 0xb2, 0xb5, 0x94, 0x82, 0x92, 0xf2, 0x83, 0x4c, 0xd7, 0x63, 0x64, 0x2c, 0x08, 0x2f, 0x0d, - 0xd2, 0x5b, 0xd4, 0x05, 0x20, 0x42, 0x10, 0xe7, 0xfd, 0x80, 0x06, 0xc2, 0x2a, 0x2a, 0x29, 0x3f, - 0xcc, 0xb4, 0x85, 0x8a, 0x0f, 0x2c, 0xea, 0xb7, 0xa7, 0xe0, 0x4e, 0xce, 0x32, 0x70, 0x8a, 0x8c, - 0x9e, 0x42, 0xc5, 0xa1, 0x91, 0xf0, 0xde, 0x79, 0x0e, 0x11, 0xd4, 0x2a, 0x29, 0x59, 0x8d, 0x2c, - 0x59, 0x3b, 0x33, 0x98, 0x76, 0x2c, 0xcd, 0x44, 0xf7, 0xc1, 0x8c, 0x98, 0x4f, 0xad, 0xf2, 0xba, - 0x71, 0xa7, 0x7a, 0xf6, 0xd5, 0x60, 0xe6, 0x53, 0xac, 0x90, 0x52, 0xf5, 0xcc, 0x10, 0x6e, 0xc1, - 0x7a, 0xfe, 0xd2, 0x6e, 0xe0, 0x34, 0x73, 0x7b, 0xf5, 0xf0, 0xa8, 0x89, 0x60, 0xb9, 0x64, 0x2c, - 0x1b, 0x2a, 0xcf, 0x8c, 0xfb, 0xc6, 0xef, 0x8c, 0xdf, 0x1b, 0xcd, 0xff, 0xe6, 0xa1, 0xf8, 0x8a, - 0x46, 0x23, 0xcf, 0xf9, 0xba, 0x59, 0xf8, 0xf0, 0x44, 0x16, 0x66, 0x06, 0x4b, 0xab, 0x9d, 0x4b, - 0xc4, 0x2d, 0x28, 0xd1, 0xc0, 0x0d, 0x99, 0x17, 0x08, 0x9d, 0x85, 0x99, 0x91, 0xda, 0xd3, 0x18, - 0x3c, 0x45, 0xa3, 0x3d, 0x58, 0x8a, 0x8b, 0xcb, 0x3e, 0x91, 0x82, 0xeb, 0x59, 0xf4, 0xdf, 0x2a, - 0xa0, 0xce, 0x9d, 0xc5, 0x61, 0x6a, 0x87, 0x76, 0x61, 0x29, 0x8c, 0xe8, 0xc8, 0x63, 0x43, 0x6e, - 0x2b, 0x27, 0x0a, 0x97, 0x72, 0x02, 0x2f, 0x26, 0x2c, 0xb9, 0x43, 0xbf, 0x82, 0x45, 0x49, 0xb6, - 0x93, 0xa6, 0x04, 0x17, 0x36, 0x25, 0x5c, 0x91, 0x04, 0xbd, 0x41, 0x2f, 0xe1, 0x3b, 0x27, 0xac, - 0x98, 0x0a, 0xaa, 0x5c, 0x2c, 0xe8, 0x66, 0xda, 0x12, 0xfd, 0x71, 0x1b, 0x1d, 0x1e, 0x35, 0xab, - 0xb0, 0x98, 0x4e, 0x81, 0xe6, 0x9f, 0x73, 0x50, 0x4a, 0x02, 0x89, 0x1e, 0xe8, 0x3b, 0x33, 0xce, - 0x8e, 0x5a, 0x82, 0x55, 0xfe, 0xc6, 0xd7, 0xf5, 0x00, 0x16, 0x42, 0x16, 0x09, 0x6e, 0xe5, 0x54, - 0x72, 0x66, 0xd6, 0xfb, 0x3e, 0x8b, 0xc4, 0x0e, 0x0b, 0xde, 0x79, 0x3d, 0x1c, 0x83, 0xd1, 0x5b, - 0xa8, 0x8c, 0xbc, 0x48, 0x0c, 0x89, 0x6f, 0x7b, 0x21, 0xb7, 0xf2, 0x8a, 0xfb, 0xa3, 0xf3, 0x54, - 0xb6, 0xde, 0xc4, 0xf8, 0xee, 0x7e, 0xa7, 0x3a, 0x39, 0x6e, 0xc0, 0x74, 0xcb, 0x31, 0x68, 0x51, - 0xdd, 0x90, 0xd7, 0x5e, 0x40, 0x79, 0x7a, 0x82, 0xee, 0x01, 0x04, 0x71, 0x5d, 0xd8, 0xd3, 0xcc, - 0x5e, 0x9a, 0x1c, 0x37, 0xca, 0xba, 0x5a, 0xba, 0xbb, 0xb8, 0xac, 0x01, 0x5d, 0x17, 0x21, 0x30, - 0x89, 0xeb, 0x46, 0x2a, 0xcf, 0xcb, 0x58, 0xad, 0x9b, 0x7f, 0x2a, 0x82, 0xf9, 0x9a, 0xf0, 0xfe, - 0x75, 0xb7, 0x68, 0xa9, 0x73, 0xae, 0x32, 0xee, 0x01, 0xf0, 0x38, 0xdf, 0xa4, 0x3b, 0xe6, 0xcc, - 0x1d, 0x9d, 0x85, 0xd2, 0x1d, 0x0d, 0x88, 0xdd, 0xe1, 0x3e, 0x13, 0xaa, 0x08, 0x4c, 0xac, 0xd6, - 0xe8, 0x36, 0x14, 0x03, 0xe6, 0x2a, 0x7a, 0x41, 0xd1, 0x61, 0x72, 0xdc, 0x28, 0xc8, 0xa6, 0xd3, - 0xdd, 0xc5, 0x05, 0x79, 0xd4, 0x75, 0x55, 0xd3, 0x09, 0x02, 0x26, 0x88, 0x6c, 0xe8, 0x5c, 0xf7, - 0xce, 0xcc, 0xec, 0x6f, 0xcf, 0x60, 0x49, 0xbf, 0x4b, 0x31, 0xd1, 0x1b, 0xb8, 0x99, 0xd8, 0x9b, - 0x16, 0x58, 0xba, 0x8a, 0x40, 0xa4, 0x25, 0xa4, 0x4e, 0x52, 0x6f, 0x4c, 0xf9, 0xec, 0x37, 0x46, - 0x45, 0x30, 0xeb, 0x8d, 0xe9, 0xc0, 0x92, 0x4b, 0xb9, 0x17, 0x51, 0x57, 0xb5, 0x09, 0xaa, 0x2a, - 0xb3, 0xba, 0xf9, 0xfd, 0xf3, 0x84, 0x50, 0xbc, 0xa8, 0x39, 0x6a, 0x87, 0xda, 0x50, 0xd2, 0x79, - 0xc3, 0xad, 0xca, 0x55, 0x9a, 0xf2, 0x94, 0x76, 0xa2, 0xcd, 0x2d, 0x5e, 0xa9, 0xcd, 0x3d, 0x04, - 0xf0, 0x59, 0xcf, 0x76, 0x23, 0x6f, 0x44, 0x23, 0x6b, 0x49, 0x4f, 0x1c, 0x19, 0xdc, 0x5d, 0x85, - 0xc0, 0x65, 0x9f, 0xf5, 0xe2, 0xe5, 0x5c, 0x53, 0xaa, 0x5e, 0xb1, 0x29, 0x11, 0xa8, 0x11, 0xce, - 0xbd, 0x5e, 0x40, 0x5d, 0xbb, 0x47, 0x03, 0x1a, 0x79, 0x8e, 0x1d, 0x51, 0xce, 0x86, 0x91, 0x43, - 0xb9, 0xf5, 0x2d, 0x15, 0x89, 0xcc, 0x99, 0xe1, 0x69, 0x0c, 0xc6, 0x1a, 0x8b, 0xad, 0x44, 0xcc, - 0xa9, 0x03, 0xbe, 0x5d, 0x3b, 0x3c, 0x6a, 0xae, 0xc2, 0x4a, 0xba, 0x4d, 0x6d, 0x19, 0x4f, 0x8c, - 0x67, 0xc6, 0xbe, 0xd1, 0xfc, 0x7b, 0x0e, 0xbe, 0x3d, 0x17, 0x53, 0xf4, 0x53, 0x28, 0xea, 0xa8, - 0x9e, 0x37, 0xf9, 0x69, 0x1e, 0x4e, 0xb0, 0x68, 0x0d, 0xca, 0xb2, 0xc4, 0x29, 0xe7, 0x34, 0x6e, - 0x5e, 0x65, 0x3c, 0xfb, 0x80, 0x2c, 0x28, 0x12, 0xdf, 0x23, 0xf2, 0x2c, 0xaf, 0xce, 0x92, 0x2d, - 0x1a, 0xc2, 0x6a, 0x1c, 0x7a, 0x7b, 0xf6, 0xc0, 0xda, 0x2c, 0x14, 0xdc, 0x32, 0x95, 0xff, 0x8f, - 0x2f, 0x95, 0x09, 0xfa, 0x72, 0x66, 0x1f, 0x5e, 0x86, 0x82, 0xef, 0x05, 0x22, 0x1a, 0xe3, 0x15, - 0x37, 0xe3, 0xa8, 0xf6, 0x14, 0x6e, 0x9d, 0x49, 0x41, 0xcb, 0x90, 0xef, 0xd3, 0x71, 0xdc, 0x9e, - 0xb0, 0x5c, 0xa2, 0x15, 0x58, 0x18, 0x11, 0x7f, 0x48, 0x75, 0x37, 0x8b, 0x37, 0xdb, 0xb9, 0x2d, - 0xa3, 0xf9, 0x29, 0x07, 0x45, 0x6d, 0xce, 0x75, 0x3f, 0xf9, 0x5a, 0xed, 0x5c, 0x63, 0x7b, 0x04, - 0x8b, 0x3a, 0xa4, 0x71, 0x45, 0x9a, 0x17, 0xe6, 0x74, 0x25, 0xc6, 0xc7, 0xd5, 0xf8, 0x08, 0x4c, - 0x2f, 0x24, 0x03, 0xfd, 0xdc, 0x67, 0x6a, 0xee, 0xee, 0xb7, 0x5f, 0xbc, 0x0c, 0xe3, 0xc6, 0x52, - 0x9a, 0x1c, 0x37, 0x4c, 0xf9, 0x01, 0x2b, 0x5a, 0xe6, 0xc3, 0xf8, 0x97, 0x05, 0x28, 0xee, 0xf8, - 0x43, 0x2e, 0x68, 0x74, 0xdd, 0x41, 0xd2, 0x6a, 0xe7, 0x82, 0xb4, 0x03, 0xc5, 0x88, 0x31, 0x61, - 0x3b, 0xe4, 0xbc, 0xf8, 0x60, 0xc6, 0xc4, 0x4e, 0xbb, 0x53, 0x95, 0x44, 0xd9, 0xdb, 0xe3, 0x3d, - 0x2e, 0x48, 0xea, 0x0e, 0x41, 0x6f, 0x61, 0x35, 0x79, 0x11, 0x0f, 0x18, 0x13, 0x5c, 0x44, 0x24, - 0xb4, 0xfb, 0x74, 0x2c, 0x67, 0xa5, 0xfc, 0x59, 0x83, 0xf6, 0x5e, 0xe0, 0x44, 0x63, 0x15, 0xbc, - 0xe7, 0x74, 0x8c, 0x57, 0xb4, 0x80, 0x4e, 0xc2, 0x7f, 0x4e, 0xc7, 0x1c, 0x3d, 0x86, 0x35, 0x3a, - 0x85, 0x49, 0x89, 0xb6, 0x4f, 0x06, 0xf2, 0xad, 0xb7, 0x1d, 0x9f, 0x39, 0x7d, 0xf5, 0xdc, 0x98, - 0xf8, 0x16, 0x4d, 0x8b, 0xfa, 0x75, 0x8c, 0xd8, 0x91, 0x00, 0xc4, 0xc1, 0x3a, 0xf0, 0x89, 0xd3, - 0xf7, 0x3d, 0x2e, 0xff, 0x97, 0x4a, 0xcd, 0xcd, 0xf2, 0xc5, 0x90, 0xb6, 0x6d, 0x9d, 0x13, 0xad, - 0x56, 0x67, 0xc6, 0x4d, 0x4d, 0xe1, 0xba, 0xa2, 0xbe, 0x7b, 0x90, 0x7d, 0x8a, 0x3a, 0x50, 0x19, - 0x06, 0x52, 0x7d, 0x1c, 0x83, 0xf2, 0x65, 0x63, 0x00, 0x31, 0x4b, 0x7a, 0x5e, 0x1b, 0xc1, 0xda, - 0x79, 0xca, 0x33, 0x6a, 0xf3, 0x49, 0xba, 0x36, 0x2b, 0x9b, 0x77, 0xb3, 0xf4, 0x65, 0x8b, 0x4c, - 0xd5, 0x71, 0x66, 0xda, 0xfe, 0xcd, 0x80, 0xc2, 0x2b, 0xea, 0x44, 0x54, 0x7c, 0xd5, 0xac, 0xdd, - 0x3a, 0x91, 0xb5, 0xf5, 0xec, 0x41, 0x58, 0x6a, 0x9d, 0x4b, 0xda, 0x1a, 0x94, 0xbc, 0x40, 0xd0, - 0x28, 0x20, 0xbe, 0xca, 0xda, 0x12, 0x9e, 0xee, 0x33, 0x1d, 0xf8, 0x64, 0x40, 0x21, 0x9e, 0x14, - 0xaf, 0xdb, 0x81, 0x58, 0xeb, 0x69, 0x07, 0x32, 0x8d, 0xfc, 0x8f, 0x01, 0xa5, 0xe4, 0xc1, 0xfa, - 0xaa, 0x66, 0x9e, 0x9a, 0xbc, 0xf2, 0xff, 0xf7, 0xe4, 0x85, 0xc0, 0xec, 0x7b, 0x81, 0x9e, 0x11, - 0xb1, 0x5a, 0xa3, 0x16, 0x14, 0x43, 0x32, 0xf6, 0x19, 0x71, 0x75, 0xa3, 0x5c, 0x99, 0xfb, 0x95, - 0xa2, 0x1d, 0x8c, 0x71, 0x02, 0xda, 0x5e, 0x39, 0x3c, 0x6a, 0x2e, 0x43, 0x35, 0xed, 0xf9, 0x7b, - 0xa3, 0xf9, 0x4f, 0x03, 0xca, 0x7b, 0x7f, 0x14, 0x34, 0x50, 0xf3, 0xc0, 0x37, 0xd2, 0xf9, 0xf5, - 0xf9, 0x5f, 0x32, 0xca, 0x27, 0x7e, 0xa4, 0xc8, 0xba, 0xd4, 0x8e, 0xf5, 0xf9, 0x4b, 0xfd, 0xc6, - 0xbf, 0xbe, 0xd4, 0x6f, 0x7c, 0x9c, 0xd4, 0x8d, 0xcf, 0x93, 0xba, 0xf1, 0x8f, 0x49, 0xdd, 0xf8, - 0xf7, 0xa4, 0x6e, 0x1c, 0x14, 0x54, 0x7c, 0x7e, 0xf2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf9, - 0xa1, 0x26, 0x54, 0x90, 0x13, 0x00, 0x00, + // 1544 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4d, 0x73, 0xdb, 0x4c, + 0x1d, 0xaf, 0x6c, 0xc5, 0x2f, 0x7f, 0x27, 0x26, 0xec, 0x13, 0x82, 0x6a, 0x82, 0x1d, 0xfc, 0x0c, + 0xcc, 0x33, 0xcf, 0x74, 0x9c, 0x12, 0x0a, 0xa4, 0x81, 0xd2, 0xda, 0x49, 0x68, 0x3d, 0xa5, 0x34, + 0xb3, 0x29, 0x2d, 0x37, 0xb1, 0x91, 0x36, 0xae, 0xb0, 0xac, 0xd5, 0x68, 0xd7, 0x2e, 0xbe, 0xf5, + 0x1c, 0x3e, 0x40, 0x6e, 0x1c, 0xfa, 0x2d, 0xb8, 0x70, 0xe0, 0xd4, 0x23, 0xc3, 0x81, 0xe1, 0x94, + 0xa1, 0xfe, 0x16, 0xcc, 0x70, 0x60, 0x76, 0xb5, 0xb2, 0x95, 0x58, 0x79, 0x63, 0x3a, 0x19, 0x4e, + 0xd1, 0x6a, 0x7f, 0xbf, 0xff, 0x9b, 0xfe, 0x6f, 0x31, 0xdc, 0xeb, 0x79, 0xe2, 0xed, 0xf0, 0xb0, + 0xe5, 0xb0, 0xc1, 0x86, 0xcb, 0x9c, 0x3e, 0x8d, 0x36, 0xf8, 0x3b, 0x12, 0x0d, 0xfa, 0x9e, 0xd8, + 0x20, 0xa1, 0xb7, 0xc1, 0x0e, 0x7f, 0x4f, 0x1d, 0xc1, 0x5b, 0x61, 0xc4, 0x04, 0x43, 0x28, 0x86, + 0xb4, 0x12, 0x48, 0x6b, 0xf4, 0xc3, 0xda, 0xd7, 0x57, 0x48, 0x10, 0xe3, 0x90, 0x6a, 0xfe, 0x95, + 0x58, 0x1e, 0x52, 0x27, 0xc1, 0x36, 0x7a, 0x8c, 0xf5, 0x7c, 0xba, 0xa1, 0x4e, 0x87, 0xc3, 0xa3, + 0x0d, 0xe1, 0x0d, 0x28, 0x17, 0x64, 0x10, 0x6a, 0xc0, 0x4a, 0x8f, 0xf5, 0x98, 0x7a, 0xdc, 0x90, + 0x4f, 0xfa, 0xed, 0xdd, 0xf3, 0x34, 0x12, 0x8c, 0xf5, 0xd5, 0x4f, 0x2f, 0xd1, 0x3e, 0x85, 0x87, + 0xfe, 0xb0, 0xe7, 0x05, 0xfa, 0x4f, 0x4c, 0x6c, 0xfe, 0xd9, 0x00, 0xf3, 0x05, 0x15, 0x04, 0xfd, + 0x0c, 0x8a, 0x23, 0x1a, 0x71, 0x8f, 0x05, 0x96, 0xb1, 0x6e, 0x7c, 0x55, 0xd9, 0xfc, 0x4e, 0x6b, + 0x3e, 0x22, 0xad, 0xd7, 0x31, 0xa4, 0x63, 0x7e, 0x3c, 0x6d, 0xdc, 0xc1, 0x09, 0x03, 0x3d, 0x04, + 0x70, 0x22, 0x4a, 0x04, 0x75, 0x6d, 0x22, 0xac, 0x9c, 0xe2, 0xd7, 0x5a, 0xb1, 0xb9, 0xad, 0x44, + 0x7f, 0xeb, 0x55, 0xe2, 0x25, 0x2e, 0x6b, 0x74, 0x5b, 0x48, 0xea, 0x30, 0x74, 0x13, 0x6a, 0xfe, + 0x6a, 0xaa, 0x46, 0xb7, 0x45, 0xf3, 0xfd, 0x02, 0x98, 0xbf, 0x66, 0x2e, 0x45, 0xab, 0x90, 0xf3, + 0x5c, 0x65, 0x76, 0xb9, 0x53, 0x98, 0x9c, 0x36, 0x72, 0xdd, 0x5d, 0x9c, 0xf3, 0x5c, 0xb4, 0x09, + 0xe6, 0x80, 0x0a, 0xa2, 0x0d, 0xb2, 0xb2, 0x1c, 0x92, 0xbe, 0x6b, 0x6f, 0x14, 0x16, 0xfd, 0x04, + 0x4c, 0xf9, 0xa9, 0xb4, 0x25, 0x6b, 0x59, 0x1c, 0xa9, 0xf3, 0x20, 0xa4, 0x4e, 0xc2, 0x93, 0x78, + 0xb4, 0x07, 0x15, 0x97, 0x72, 0x27, 0xf2, 0x42, 0x21, 0x63, 0x68, 0x2a, 0xfa, 0x97, 0x17, 0xd1, + 0x77, 0x67, 0x50, 0x9c, 0xe6, 0xa1, 0x9f, 0x43, 0x81, 0x0b, 0x22, 0x86, 0xdc, 0x5a, 0x50, 0x12, + 0xea, 0x17, 0x1a, 0xa0, 0x50, 0xda, 0x04, 0xcd, 0x41, 0xcf, 0xa0, 0x3a, 0x20, 0x01, 0xe9, 0xd1, + 0xc8, 0xd6, 0x52, 0x0a, 0x4a, 0xca, 0xf7, 0x32, 0x5d, 0x8f, 0x91, 0xb1, 0x20, 0xbc, 0x34, 0x48, + 0x1f, 0x51, 0x17, 0x80, 0x08, 0x41, 0x9c, 0xb7, 0x03, 0x1a, 0x08, 0xab, 0xa8, 0xa4, 0x7c, 0x3f, + 0xd3, 0x16, 0x2a, 0xde, 0xb1, 0xa8, 0xdf, 0x9e, 0x82, 0x3b, 0x39, 0xcb, 0xc0, 0x29, 0x32, 0x7a, + 0x0a, 0x15, 0x87, 0x46, 0xc2, 0x3b, 0xf2, 0x1c, 0x22, 0xa8, 0x55, 0x52, 0xb2, 0x1a, 0x59, 0xb2, + 0x76, 0x66, 0x30, 0xed, 0x58, 0x9a, 0x89, 0xee, 0x83, 0x19, 0x31, 0x9f, 0x5a, 0xe5, 0x75, 0xe3, + 0xab, 0xea, 0xc5, 0x9f, 0x06, 0x33, 0x9f, 0x62, 0x85, 0x94, 0xaa, 0x67, 0x86, 0x70, 0x0b, 0xd6, + 0xf3, 0xd7, 0x76, 0x03, 0xa7, 0x99, 0xdb, 0xab, 0xc7, 0x27, 0x4d, 0x04, 0xcb, 0x25, 0x63, 0xd9, + 0x50, 0x79, 0x66, 0xdc, 0x37, 0x7e, 0x6b, 0xfc, 0xce, 0x68, 0xfe, 0x27, 0x0f, 0xc5, 0x03, 0x1a, + 0x8d, 0x3c, 0xe7, 0xf3, 0x66, 0xe1, 0xc3, 0x33, 0x59, 0x98, 0x19, 0x2c, 0xad, 0x76, 0x2e, 0x11, + 0xb7, 0xa0, 0x44, 0x03, 0x37, 0x64, 0x5e, 0x20, 0x74, 0x16, 0x66, 0x46, 0x6a, 0x4f, 0x63, 0xf0, + 0x14, 0x8d, 0xf6, 0x60, 0x29, 0x2e, 0x2e, 0xfb, 0x4c, 0x0a, 0xae, 0x67, 0xd1, 0x7f, 0xa3, 0x80, + 0x3a, 0x77, 0x16, 0x87, 0xa9, 0x13, 0xda, 0x85, 0xa5, 0x30, 0xa2, 0x23, 0x8f, 0x0d, 0xb9, 0xad, + 0x9c, 0x28, 0x5c, 0xcb, 0x09, 0xbc, 0x98, 0xb0, 0xe4, 0x09, 0xfd, 0x02, 0x16, 0x25, 0xd9, 0x4e, + 0x9a, 0x12, 0x5c, 0xd9, 0x94, 0x70, 0x45, 0x12, 0xf4, 0x01, 0xbd, 0x84, 0x6f, 0x9d, 0xb1, 0x62, + 0x2a, 0xa8, 0x72, 0xb5, 0xa0, 0x2f, 0xd2, 0x96, 0xe8, 0x97, 0xdb, 0xe8, 0xf8, 0xa4, 0x59, 0x85, + 0xc5, 0x74, 0x0a, 0x34, 0xff, 0x94, 0x83, 0x52, 0x12, 0x48, 0xf4, 0x40, 0x7f, 0x33, 0xe3, 0xe2, + 0xa8, 0x25, 0x58, 0xe5, 0x6f, 0xfc, 0xb9, 0x1e, 0xc0, 0x42, 0xc8, 0x22, 0xc1, 0xad, 0x9c, 0x4a, + 0xce, 0xcc, 0x7a, 0xdf, 0x67, 0x91, 0xd8, 0x61, 0xc1, 0x91, 0xd7, 0xc3, 0x31, 0x18, 0xbd, 0x81, + 0xca, 0xc8, 0x8b, 0xc4, 0x90, 0xf8, 0xb6, 0x17, 0x72, 0x2b, 0xaf, 0xb8, 0x3f, 0xb8, 0x4c, 0x65, + 0xeb, 0x75, 0x8c, 0xef, 0xee, 0x77, 0xaa, 0x93, 0xd3, 0x06, 0x4c, 0x8f, 0x1c, 0x83, 0x16, 0xd5, + 0x0d, 0x79, 0xed, 0x05, 0x94, 0xa7, 0x37, 0xe8, 0x1e, 0x40, 0x10, 0xd7, 0x85, 0x3d, 0xcd, 0xec, + 0xa5, 0xc9, 0x69, 0xa3, 0xac, 0xab, 0xa5, 0xbb, 0x8b, 0xcb, 0x1a, 0xd0, 0x75, 0x11, 0x02, 0x93, + 0xb8, 0x6e, 0xa4, 0xf2, 0xbc, 0x8c, 0xd5, 0x73, 0xf3, 0x8f, 0x45, 0x30, 0x5f, 0x11, 0xde, 0xbf, + 0xed, 0x16, 0x2d, 0x75, 0xce, 0x55, 0xc6, 0x3d, 0x00, 0x1e, 0xe7, 0x9b, 0x74, 0xc7, 0x9c, 0xb9, + 0xa3, 0xb3, 0x50, 0xba, 0xa3, 0x01, 0xb1, 0x3b, 0xdc, 0x67, 0x42, 0x15, 0x81, 0x89, 0xd5, 0x33, + 0xfa, 0x12, 0x8a, 0x01, 0x73, 0x15, 0xbd, 0xa0, 0xe8, 0x30, 0x39, 0x6d, 0x14, 0x64, 0xd3, 0xe9, + 0xee, 0xe2, 0x82, 0xbc, 0xea, 0xba, 0xaa, 0xe9, 0x04, 0x01, 0x13, 0x44, 0x36, 0x74, 0xae, 0x7b, + 0x67, 0x66, 0xf6, 0xb7, 0x67, 0xb0, 0xa4, 0xdf, 0xa5, 0x98, 0xe8, 0x35, 0x7c, 0x91, 0xd8, 0x9b, + 0x16, 0x58, 0xba, 0x89, 0x40, 0xa4, 0x25, 0xa4, 0x6e, 0x52, 0x33, 0xa6, 0x7c, 0xf1, 0x8c, 0x51, + 0x11, 0xcc, 0x9a, 0x31, 0x1d, 0x58, 0x72, 0x29, 0xf7, 0x22, 0xea, 0xaa, 0x36, 0x41, 0x55, 0x65, + 0x56, 0x37, 0xbf, 0x7b, 0x99, 0x10, 0x8a, 0x17, 0x35, 0x47, 0x9d, 0x50, 0x1b, 0x4a, 0x3a, 0x6f, + 0xb8, 0x55, 0xb9, 0x49, 0x53, 0x9e, 0xd2, 0xce, 0xb4, 0xb9, 0xc5, 0x1b, 0xb5, 0xb9, 0x87, 0x00, + 0x3e, 0xeb, 0xd9, 0x6e, 0xe4, 0x8d, 0x68, 0x64, 0x2d, 0xe9, 0x8d, 0x23, 0x83, 0xbb, 0xab, 0x10, + 0xb8, 0xec, 0xb3, 0x5e, 0xfc, 0x38, 0xd7, 0x94, 0xaa, 0x37, 0x6c, 0x4a, 0x04, 0x6a, 0x84, 0x73, + 0xaf, 0x17, 0x50, 0xd7, 0xee, 0xd1, 0x80, 0x46, 0x9e, 0x63, 0x47, 0x94, 0xb3, 0x61, 0xe4, 0x50, + 0x6e, 0x7d, 0x43, 0x45, 0x22, 0x73, 0x67, 0x78, 0x1a, 0x83, 0xb1, 0xc6, 0x62, 0x2b, 0x11, 0x73, + 0xee, 0x82, 0x6f, 0xd7, 0x8e, 0x4f, 0x9a, 0xab, 0xb0, 0x92, 0x6e, 0x53, 0x5b, 0xc6, 0x13, 0xe3, + 0x99, 0xb1, 0x6f, 0x34, 0xff, 0x9a, 0x83, 0x6f, 0xce, 0xc5, 0x14, 0xfd, 0x18, 0x8a, 0x3a, 0xaa, + 0x97, 0x6d, 0x7e, 0x9a, 0x87, 0x13, 0x2c, 0x5a, 0x83, 0xb2, 0x2c, 0x71, 0xca, 0x39, 0x8d, 0x9b, + 0x57, 0x19, 0xcf, 0x5e, 0x20, 0x0b, 0x8a, 0xc4, 0xf7, 0x88, 0xbc, 0xcb, 0xab, 0xbb, 0xe4, 0x88, + 0x86, 0xb0, 0x1a, 0x87, 0xde, 0x9e, 0x0d, 0x58, 0x9b, 0x85, 0x82, 0x5b, 0xa6, 0xf2, 0xff, 0xf1, + 0xb5, 0x32, 0x41, 0x7f, 0x9c, 0xd9, 0x8b, 0x97, 0xa1, 0xe0, 0x7b, 0x81, 0x88, 0xc6, 0x78, 0xc5, + 0xcd, 0xb8, 0xaa, 0x3d, 0x85, 0xbb, 0x17, 0x52, 0xd0, 0x32, 0xe4, 0xfb, 0x74, 0x1c, 0xb7, 0x27, + 0x2c, 0x1f, 0xd1, 0x0a, 0x2c, 0x8c, 0x88, 0x3f, 0xa4, 0xba, 0x9b, 0xc5, 0x87, 0xed, 0xdc, 0x96, + 0xd1, 0xfc, 0x90, 0x83, 0xa2, 0x36, 0xe7, 0xb6, 0x47, 0xbe, 0x56, 0x3b, 0xd7, 0xd8, 0x1e, 0xc1, + 0xa2, 0x0e, 0x69, 0x5c, 0x91, 0xe6, 0x95, 0x39, 0x5d, 0x89, 0xf1, 0x71, 0x35, 0x3e, 0x02, 0xd3, + 0x0b, 0xc9, 0x40, 0x8f, 0xfb, 0x4c, 0xcd, 0xdd, 0xfd, 0xf6, 0x8b, 0x97, 0x61, 0xdc, 0x58, 0x4a, + 0x93, 0xd3, 0x86, 0x29, 0x5f, 0x60, 0x45, 0xcb, 0x1c, 0x8c, 0x7f, 0x5f, 0x80, 0xe2, 0x8e, 0x3f, + 0xe4, 0x82, 0x46, 0xb7, 0x1d, 0x24, 0xad, 0x76, 0x2e, 0x48, 0x3b, 0x50, 0x8c, 0x18, 0x13, 0xb6, + 0x43, 0x2e, 0x8b, 0x0f, 0x66, 0x4c, 0xec, 0xb4, 0x3b, 0x55, 0x49, 0x94, 0xbd, 0x3d, 0x3e, 0xe3, + 0x82, 0xa4, 0xee, 0x10, 0xf4, 0x06, 0x56, 0x93, 0x89, 0x78, 0xc8, 0x98, 0xe0, 0x22, 0x22, 0xa1, + 0xdd, 0xa7, 0x63, 0xb9, 0x2b, 0xe5, 0x2f, 0x5a, 0xb4, 0xf7, 0x02, 0x27, 0x1a, 0xab, 0xe0, 0x3d, + 0xa7, 0x63, 0xbc, 0xa2, 0x05, 0x74, 0x12, 0xfe, 0x73, 0x3a, 0xe6, 0xe8, 0x31, 0xac, 0xd1, 0x29, + 0x4c, 0x4a, 0xb4, 0x7d, 0x32, 0x90, 0xb3, 0xde, 0x76, 0x7c, 0xe6, 0xf4, 0xd5, 0xb8, 0x31, 0xf1, + 0x5d, 0x9a, 0x16, 0xf5, 0xab, 0x18, 0xb1, 0x23, 0x01, 0x88, 0x83, 0x75, 0xe8, 0x13, 0xa7, 0xef, + 0x7b, 0x5c, 0xfe, 0x2f, 0x95, 0xda, 0x9b, 0xe5, 0xc4, 0x90, 0xb6, 0x6d, 0x5d, 0x12, 0xad, 0x56, + 0x67, 0xc6, 0x4d, 0x6d, 0xe1, 0xba, 0xa2, 0xbe, 0x7d, 0x98, 0x7d, 0x8b, 0x3a, 0x50, 0x19, 0x06, + 0x52, 0x7d, 0x1c, 0x83, 0xf2, 0x75, 0x63, 0x00, 0x31, 0x4b, 0x79, 0xbe, 0x06, 0xe6, 0x91, 0xdc, + 0x61, 0xe4, 0x18, 0x29, 0xc5, 0xc9, 0xf5, 0xcb, 0xee, 0xfe, 0x01, 0x56, 0x6f, 0x6b, 0x23, 0x58, + 0xbb, 0xcc, 0xb4, 0x8c, 0xca, 0x7d, 0x92, 0xae, 0xdc, 0xca, 0xe6, 0xd7, 0x59, 0xd6, 0x64, 0x8b, + 0x4c, 0x55, 0x79, 0x66, 0x52, 0xff, 0xc5, 0x80, 0xc2, 0x01, 0x75, 0x22, 0x2a, 0x3e, 0x6b, 0x4e, + 0x6f, 0x9d, 0xc9, 0xe9, 0x7a, 0xf6, 0x9a, 0x2c, 0xb5, 0xce, 0xa5, 0x74, 0x0d, 0x4a, 0x5e, 0x20, + 0x68, 0x14, 0x10, 0x5f, 0xe5, 0x74, 0x09, 0x4f, 0xcf, 0x99, 0x0e, 0x7c, 0x30, 0xa0, 0x10, 0xef, + 0x91, 0xb7, 0xed, 0x40, 0xac, 0xf5, 0xbc, 0x03, 0x99, 0x46, 0xfe, 0xdb, 0x80, 0x52, 0x32, 0xce, + 0x3e, 0xab, 0x99, 0xe7, 0xf6, 0xb2, 0xfc, 0xff, 0xbc, 0x97, 0x21, 0x30, 0xfb, 0x5e, 0xa0, 0x37, + 0x48, 0xac, 0x9e, 0x51, 0x0b, 0x8a, 0x21, 0x19, 0xfb, 0x8c, 0xb8, 0xba, 0x8d, 0xae, 0xcc, 0xfd, + 0x86, 0xd1, 0x0e, 0xc6, 0x38, 0x01, 0x6d, 0xaf, 0x1c, 0x9f, 0x34, 0x97, 0xa1, 0x9a, 0xf6, 0xfc, + 0xad, 0xd1, 0xfc, 0x87, 0x01, 0xe5, 0xbd, 0x3f, 0x08, 0x1a, 0xa8, 0x6d, 0xe1, 0xff, 0xd2, 0xf9, + 0xf5, 0xf9, 0xdf, 0x39, 0xca, 0x67, 0x7e, 0xc2, 0xc8, 0xfa, 0xa8, 0x1d, 0xeb, 0xe3, 0xa7, 0xfa, + 0x9d, 0x7f, 0x7e, 0xaa, 0xdf, 0x79, 0x3f, 0xa9, 0x1b, 0x1f, 0x27, 0x75, 0xe3, 0x6f, 0x93, 0xba, + 0xf1, 0xaf, 0x49, 0xdd, 0x38, 0x2c, 0xa8, 0xf8, 0xfc, 0xe8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x07, 0xc5, 0x5a, 0x5b, 0xae, 0x13, 0x00, 0x00, } diff --git a/vendor/github.com/docker/swarmkit/api/raft.pb.go b/vendor/github.com/docker/swarmkit/api/raft.pb.go index ddee3e18a..058b29450 100644 --- a/vendor/github.com/docker/swarmkit/api/raft.pb.go +++ b/vendor/github.com/docker/swarmkit/api/raft.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/raft.proto -// DO NOT EDIT! package api @@ -12,18 +11,16 @@ import raftpb "github.com/coreos/etcd/raft/raftpb" // skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" // skipping weak import docker_protobuf_plugin "github.com/docker/swarmkit/protobuf/plugin" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" import raftselector "github.com/docker/swarmkit/manager/raftselector" import codes "google.golang.org/grpc/codes" import status "google.golang.org/grpc/status" import metadata "google.golang.org/grpc/metadata" -import transport "google.golang.org/grpc/transport" +import peer "google.golang.org/grpc/peer" import rafttime "time" import strings "strings" @@ -616,7 +613,7 @@ func (m *RaftMember) CopyFrom(src interface{}) { o := src.(*RaftMember) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status) + deepcopy.Copy(&m.Status, &o.Status) } func (m *JoinRequest) Copy() *JoinRequest { @@ -651,7 +648,7 @@ func (m *JoinResponse) CopyFrom(src interface{}) { m.Members = make([]*RaftMember, len(o.Members)) for i := range m.Members { m.Members[i] = &RaftMember{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Members[i], o.Members[i]) + deepcopy.Copy(m.Members[i], o.Members[i]) } } @@ -677,7 +674,7 @@ func (m *LeaveRequest) CopyFrom(src interface{}) { *m = *o if o.Node != nil { m.Node = &RaftMember{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) + deepcopy.Copy(m.Node, o.Node) } } @@ -757,7 +754,7 @@ func (m *InternalRaftRequest) CopyFrom(src interface{}) { if o.Action != nil { m.Action = make([]StoreAction, len(o.Action)) for i := range m.Action { - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Action[i], &o.Action[i]) + deepcopy.Copy(&m.Action[i], &o.Action[i]) } } @@ -782,55 +779,55 @@ func (m *StoreAction) CopyFrom(src interface{}) { v := StoreAction_Node{ Node: &Node{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Node, o.GetNode()) + deepcopy.Copy(v.Node, o.GetNode()) m.Target = &v case *StoreAction_Service: v := StoreAction_Service{ Service: &Service{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Service, o.GetService()) + deepcopy.Copy(v.Service, o.GetService()) m.Target = &v case *StoreAction_Task: v := StoreAction_Task{ Task: &Task{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Task, o.GetTask()) + deepcopy.Copy(v.Task, o.GetTask()) m.Target = &v case *StoreAction_Network: v := StoreAction_Network{ Network: &Network{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Network, o.GetNetwork()) + deepcopy.Copy(v.Network, o.GetNetwork()) m.Target = &v case *StoreAction_Cluster: v := StoreAction_Cluster{ Cluster: &Cluster{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Cluster, o.GetCluster()) + deepcopy.Copy(v.Cluster, o.GetCluster()) m.Target = &v case *StoreAction_Secret: v := StoreAction_Secret{ Secret: &Secret{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Secret, o.GetSecret()) + deepcopy.Copy(v.Secret, o.GetSecret()) m.Target = &v case *StoreAction_Resource: v := StoreAction_Resource{ Resource: &Resource{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Resource, o.GetResource()) + deepcopy.Copy(v.Resource, o.GetResource()) m.Target = &v case *StoreAction_Extension: v := StoreAction_Extension{ Extension: &Extension{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Extension, o.GetExtension()) + deepcopy.Copy(v.Extension, o.GetExtension()) m.Target = &v case *StoreAction_Config: v := StoreAction_Config{ Config: &Config{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig()) + deepcopy.Copy(v.Config, o.GetConfig()) m.Target = &v } } @@ -1612,24 +1609,6 @@ func (m *StoreAction_Config) MarshalTo(dAtA []byte) (int, error) { } return i, nil } -func encodeFixed64Raft(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Raft(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1648,12 +1627,12 @@ type raftProxyRaftServer struct { func NewRaftProxyRaftServer(local RaftServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -1661,7 +1640,7 @@ func NewRaftProxyRaftServer(local RaftServer, connSelector raftselector.ConnProv md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) @@ -1847,12 +1826,12 @@ type raftProxyRaftMembershipServer struct { func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftMembershipServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -1860,7 +1839,7 @@ func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector r md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) diff --git a/vendor/github.com/docker/swarmkit/api/resource.pb.go b/vendor/github.com/docker/swarmkit/api/resource.pb.go index d4e27f336..2d4741993 100644 --- a/vendor/github.com/docker/swarmkit/api/resource.pb.go +++ b/vendor/github.com/docker/swarmkit/api/resource.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/resource.proto -// DO NOT EDIT! package api @@ -10,18 +9,16 @@ import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/docker/swarmkit/protobuf/plugin" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" import raftselector "github.com/docker/swarmkit/manager/raftselector" import codes "google.golang.org/grpc/codes" import status "google.golang.org/grpc/status" import metadata "google.golang.org/grpc/metadata" -import transport "google.golang.org/grpc/transport" +import peer "google.golang.org/grpc/peer" import rafttime "time" import strings "strings" @@ -116,7 +113,7 @@ func (m *AttachNetworkRequest) CopyFrom(src interface{}) { *m = *o if o.Config != nil { m.Config = &NetworkAttachmentConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) + deepcopy.Copy(m.Config, o.Config) } } @@ -366,24 +363,6 @@ func (m *DetachNetworkResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Resource(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Resource(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintResource(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -402,12 +381,12 @@ type raftProxyResourceAllocatorServer struct { func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -415,7 +394,7 @@ func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSele md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) diff --git a/vendor/github.com/docker/swarmkit/api/snapshot.pb.go b/vendor/github.com/docker/swarmkit/api/snapshot.pb.go index f8c32b772..4d6893a90 100644 --- a/vendor/github.com/docker/swarmkit/api/snapshot.pb.go +++ b/vendor/github.com/docker/swarmkit/api/snapshot.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/snapshot.proto -// DO NOT EDIT! package api @@ -10,7 +9,7 @@ import math "math" // skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" import strings "strings" import reflect "reflect" @@ -102,7 +101,7 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) { m.Nodes = make([]*Node, len(o.Nodes)) for i := range m.Nodes { m.Nodes[i] = &Node{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + deepcopy.Copy(m.Nodes[i], o.Nodes[i]) } } @@ -110,7 +109,7 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) { m.Services = make([]*Service, len(o.Services)) for i := range m.Services { m.Services[i] = &Service{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Services[i], o.Services[i]) + deepcopy.Copy(m.Services[i], o.Services[i]) } } @@ -118,7 +117,7 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) { m.Networks = make([]*Network, len(o.Networks)) for i := range m.Networks { m.Networks[i] = &Network{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + deepcopy.Copy(m.Networks[i], o.Networks[i]) } } @@ -126,7 +125,7 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) { m.Tasks = make([]*Task, len(o.Tasks)) for i := range m.Tasks { m.Tasks[i] = &Task{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) } } @@ -134,7 +133,7 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) { m.Clusters = make([]*Cluster, len(o.Clusters)) for i := range m.Clusters { m.Clusters[i] = &Cluster{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + deepcopy.Copy(m.Clusters[i], o.Clusters[i]) } } @@ -142,7 +141,7 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) { m.Secrets = make([]*Secret, len(o.Secrets)) for i := range m.Secrets { m.Secrets[i] = &Secret{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) } } @@ -150,7 +149,7 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) { m.Resources = make([]*Resource, len(o.Resources)) for i := range m.Resources { m.Resources[i] = &Resource{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Resources[i], o.Resources[i]) + deepcopy.Copy(m.Resources[i], o.Resources[i]) } } @@ -158,7 +157,7 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) { m.Extensions = make([]*Extension, len(o.Extensions)) for i := range m.Extensions { m.Extensions[i] = &Extension{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Extensions[i], o.Extensions[i]) + deepcopy.Copy(m.Extensions[i], o.Extensions[i]) } } @@ -166,7 +165,7 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) { m.Configs = make([]*Config, len(o.Configs)) for i := range m.Configs { m.Configs[i] = &Config{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i]) + deepcopy.Copy(m.Configs[i], o.Configs[i]) } } @@ -189,7 +188,7 @@ func (m *ClusterSnapshot) CopyFrom(src interface{}) { m.Members = make([]*RaftMember, len(o.Members)) for i := range m.Members { m.Members[i] = &RaftMember{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Members[i], o.Members[i]) + deepcopy.Copy(m.Members[i], o.Members[i]) } } @@ -213,8 +212,8 @@ func (m *Snapshot) CopyFrom(src interface{}) { o := src.(*Snapshot) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Membership, &o.Membership) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Store, &o.Store) + deepcopy.Copy(&m.Membership, &o.Membership) + deepcopy.Copy(&m.Store, &o.Store) } func (m *StoreSnapshot) Marshal() (dAtA []byte, err error) { @@ -419,24 +418,6 @@ func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Snapshot(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Snapshot(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/github.com/docker/swarmkit/api/specs.pb.go b/vendor/github.com/docker/swarmkit/api/specs.pb.go index dfd18a6d7..2930afc6c 100644 --- a/vendor/github.com/docker/swarmkit/api/specs.pb.go +++ b/vendor/github.com/docker/swarmkit/api/specs.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/specs.proto -// DO NOT EDIT! package api @@ -12,11 +11,11 @@ import google_protobuf1 "github.com/gogo/protobuf/types" import google_protobuf3 "github.com/gogo/protobuf/types" import google_protobuf4 "github.com/gogo/protobuf/types" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -885,7 +884,7 @@ func (m *NodeSpec) CopyFrom(src interface{}) { o := src.(*NodeSpec) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Annotations, &o.Annotations) } func (m *ServiceSpec) Copy() *ServiceSpec { @@ -901,27 +900,27 @@ func (m *ServiceSpec) CopyFrom(src interface{}) { o := src.(*ServiceSpec) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Task, &o.Task) + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Task, &o.Task) if o.Update != nil { m.Update = &UpdateConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Update, o.Update) + deepcopy.Copy(m.Update, o.Update) } if o.Rollback != nil { m.Rollback = &UpdateConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Rollback, o.Rollback) + deepcopy.Copy(m.Rollback, o.Rollback) } if o.Networks != nil { m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) for i := range m.Networks { m.Networks[i] = &NetworkAttachmentConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + deepcopy.Copy(m.Networks[i], o.Networks[i]) } } if o.Endpoint != nil { m.Endpoint = &EndpointSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Endpoint, o.Endpoint) + deepcopy.Copy(m.Endpoint, o.Endpoint) } if o.Mode != nil { switch o.Mode.(type) { @@ -929,13 +928,13 @@ func (m *ServiceSpec) CopyFrom(src interface{}) { v := ServiceSpec_Replicated{ Replicated: &ReplicatedService{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Replicated, o.GetReplicated()) + deepcopy.Copy(v.Replicated, o.GetReplicated()) m.Mode = &v case *ServiceSpec_Global: v := ServiceSpec_Global{ Global: &GlobalService{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Global, o.GetGlobal()) + deepcopy.Copy(v.Global, o.GetGlobal()) m.Mode = &v } } @@ -982,32 +981,32 @@ func (m *TaskSpec) CopyFrom(src interface{}) { *m = *o if o.Resources != nil { m.Resources = &ResourceRequirements{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Resources, o.Resources) + deepcopy.Copy(m.Resources, o.Resources) } if o.Restart != nil { m.Restart = &RestartPolicy{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Restart, o.Restart) + deepcopy.Copy(m.Restart, o.Restart) } if o.Placement != nil { m.Placement = &Placement{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Placement, o.Placement) + deepcopy.Copy(m.Placement, o.Placement) } if o.LogDriver != nil { m.LogDriver = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.LogDriver, o.LogDriver) + deepcopy.Copy(m.LogDriver, o.LogDriver) } if o.Networks != nil { m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) for i := range m.Networks { m.Networks[i] = &NetworkAttachmentConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + deepcopy.Copy(m.Networks[i], o.Networks[i]) } } if o.ResourceReferences != nil { m.ResourceReferences = make([]ResourceReference, len(o.ResourceReferences)) for i := range m.ResourceReferences { - github_com_docker_swarmkit_api_deepcopy.Copy(&m.ResourceReferences[i], &o.ResourceReferences[i]) + deepcopy.Copy(&m.ResourceReferences[i], &o.ResourceReferences[i]) } } @@ -1017,19 +1016,19 @@ func (m *TaskSpec) CopyFrom(src interface{}) { v := TaskSpec_Attachment{ Attachment: &NetworkAttachmentSpec{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Attachment, o.GetAttachment()) + deepcopy.Copy(v.Attachment, o.GetAttachment()) m.Runtime = &v case *TaskSpec_Container: v := TaskSpec_Container{ Container: &ContainerSpec{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Container, o.GetContainer()) + deepcopy.Copy(v.Container, o.GetContainer()) m.Runtime = &v case *TaskSpec_Generic: v := TaskSpec_Generic{ Generic: &GenericRuntimeSpec{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Generic, o.GetGeneric()) + deepcopy.Copy(v.Generic, o.GetGeneric()) m.Runtime = &v } } @@ -1066,7 +1065,7 @@ func (m *GenericRuntimeSpec) CopyFrom(src interface{}) { *m = *o if o.Payload != nil { m.Payload = &google_protobuf3.Any{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload) + deepcopy.Copy(m.Payload, o.Payload) } } @@ -1127,32 +1126,32 @@ func (m *ContainerSpec) CopyFrom(src interface{}) { if o.Privileges != nil { m.Privileges = &Privileges{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Privileges, o.Privileges) + deepcopy.Copy(m.Privileges, o.Privileges) } if o.Init != nil { m.Init = &google_protobuf4.BoolValue{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Init, o.Init) + deepcopy.Copy(m.Init, o.Init) } if o.Mounts != nil { m.Mounts = make([]Mount, len(o.Mounts)) for i := range m.Mounts { - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Mounts[i], &o.Mounts[i]) + deepcopy.Copy(&m.Mounts[i], &o.Mounts[i]) } } if o.StopGracePeriod != nil { m.StopGracePeriod = &google_protobuf1.Duration{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.StopGracePeriod, o.StopGracePeriod) + deepcopy.Copy(m.StopGracePeriod, o.StopGracePeriod) } if o.PullOptions != nil { m.PullOptions = &ContainerSpec_PullOptions{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.PullOptions, o.PullOptions) + deepcopy.Copy(m.PullOptions, o.PullOptions) } if o.Secrets != nil { m.Secrets = make([]*SecretReference, len(o.Secrets)) for i := range m.Secrets { m.Secrets[i] = &SecretReference{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) } } @@ -1160,7 +1159,7 @@ func (m *ContainerSpec) CopyFrom(src interface{}) { m.Configs = make([]*ConfigReference, len(o.Configs)) for i := range m.Configs { m.Configs[i] = &ConfigReference{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i]) + deepcopy.Copy(m.Configs[i], o.Configs[i]) } } @@ -1171,11 +1170,11 @@ func (m *ContainerSpec) CopyFrom(src interface{}) { if o.DNSConfig != nil { m.DNSConfig = &ContainerSpec_DNSConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.DNSConfig, o.DNSConfig) + deepcopy.Copy(m.DNSConfig, o.DNSConfig) } if o.Healthcheck != nil { m.Healthcheck = &HealthConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Healthcheck, o.Healthcheck) + deepcopy.Copy(m.Healthcheck, o.Healthcheck) } } @@ -1241,7 +1240,7 @@ func (m *EndpointSpec) CopyFrom(src interface{}) { m.Ports = make([]*PortConfig, len(o.Ports)) for i := range m.Ports { m.Ports[i] = &PortConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Ports[i], o.Ports[i]) + deepcopy.Copy(m.Ports[i], o.Ports[i]) } } @@ -1260,14 +1259,14 @@ func (m *NetworkSpec) CopyFrom(src interface{}) { o := src.(*NetworkSpec) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Annotations, &o.Annotations) if o.DriverConfig != nil { m.DriverConfig = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.DriverConfig, o.DriverConfig) + deepcopy.Copy(m.DriverConfig, o.DriverConfig) } if o.IPAM != nil { m.IPAM = &IPAMOptions{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.IPAM, o.IPAM) + deepcopy.Copy(m.IPAM, o.IPAM) } if o.ConfigFrom != nil { switch o.ConfigFrom.(type) { @@ -1294,14 +1293,14 @@ func (m *ClusterSpec) CopyFrom(src interface{}) { o := src.(*ClusterSpec) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.AcceptancePolicy, &o.AcceptancePolicy) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Orchestration, &o.Orchestration) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Raft, &o.Raft) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Dispatcher, &o.Dispatcher) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.CAConfig, &o.CAConfig) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.TaskDefaults, &o.TaskDefaults) - github_com_docker_swarmkit_api_deepcopy.Copy(&m.EncryptionConfig, &o.EncryptionConfig) + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.AcceptancePolicy, &o.AcceptancePolicy) + deepcopy.Copy(&m.Orchestration, &o.Orchestration) + deepcopy.Copy(&m.Raft, &o.Raft) + deepcopy.Copy(&m.Dispatcher, &o.Dispatcher) + deepcopy.Copy(&m.CAConfig, &o.CAConfig) + deepcopy.Copy(&m.TaskDefaults, &o.TaskDefaults) + deepcopy.Copy(&m.EncryptionConfig, &o.EncryptionConfig) } func (m *SecretSpec) Copy() *SecretSpec { @@ -1317,18 +1316,18 @@ func (m *SecretSpec) CopyFrom(src interface{}) { o := src.(*SecretSpec) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Annotations, &o.Annotations) if o.Data != nil { m.Data = make([]byte, len(o.Data)) copy(m.Data, o.Data) } if o.Templating != nil { m.Templating = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Templating, o.Templating) + deepcopy.Copy(m.Templating, o.Templating) } if o.Driver != nil { m.Driver = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Driver, o.Driver) + deepcopy.Copy(m.Driver, o.Driver) } } @@ -1345,14 +1344,14 @@ func (m *ConfigSpec) CopyFrom(src interface{}) { o := src.(*ConfigSpec) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Annotations, &o.Annotations) if o.Data != nil { m.Data = make([]byte, len(o.Data)) copy(m.Data, o.Data) } if o.Templating != nil { m.Templating = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Templating, o.Templating) + deepcopy.Copy(m.Templating, o.Templating) } } @@ -2469,24 +2468,6 @@ func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Specs(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Specs(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintSpecs(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -3116,7 +3097,7 @@ func (this *ContainerSpec) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -4554,51 +4535,14 @@ func (m *ContainerSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpecs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpecs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSpecs - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowSpecs @@ -4608,41 +4552,80 @@ func (m *ContainerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSpecs + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSpecs - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/vendor/github.com/docker/swarmkit/api/types.pb.go b/vendor/github.com/docker/swarmkit/api/types.pb.go index f7d0cdd27..2ddc00756 100644 --- a/vendor/github.com/docker/swarmkit/api/types.pb.go +++ b/vendor/github.com/docker/swarmkit/api/types.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/types.proto -// DO NOT EDIT! package api @@ -14,13 +13,14 @@ import _ "github.com/gogo/protobuf/gogoproto" import os "os" import time "time" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" -import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" +import binary "encoding/binary" +import types "github.com/gogo/protobuf/types" import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -54,9 +54,11 @@ func (x ResourceType) String() string { } func (ResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } -// TaskState enumerates the states that a task progresses through within an -// agent. States are designed to be monotonically increasing, such that if two -// states are seen by a task, the greater of the new represents the true state. +// Only the manager create a NEW task, and move the task to PENDING and ASSIGNED. +// Afterward, the manager must rely on the agent to update the task status +// (pre-run: preparing, ready, starting; +// running; +// end-state: complete, shutdown, failed, rejected) type TaskState int32 const ( @@ -71,7 +73,9 @@ const ( TaskStateCompleted TaskState = 576 TaskStateShutdown TaskState = 640 TaskStateFailed TaskState = 704 - TaskStateRejected TaskState = 768 + // TaskStateRejected means a task never ran, for instance if something about + // the environment failed (e.g. setting up a port on that node failed). + TaskStateRejected TaskState = 768 // TaskStateRemove is used to correctly handle service deletions and scale // downs. This allows us to keep track of tasks that have been marked for // deletion, but can't yet be removed because the agent is in the process of @@ -578,15 +582,18 @@ type MaybeEncryptedRecord_Algorithm int32 const ( MaybeEncryptedRecord_NotEncrypted MaybeEncryptedRecord_Algorithm = 0 MaybeEncryptedRecord_NACLSecretboxSalsa20Poly1305 MaybeEncryptedRecord_Algorithm = 1 + MaybeEncryptedRecord_FernetAES128CBC MaybeEncryptedRecord_Algorithm = 2 ) var MaybeEncryptedRecord_Algorithm_name = map[int32]string{ 0: "NONE", 1: "SECRETBOX_SALSA20_POLY1305", + 2: "FERNET_AES_128_CBC", } var MaybeEncryptedRecord_Algorithm_value = map[string]int32{ "NONE": 0, "SECRETBOX_SALSA20_POLY1305": 1, + "FERNET_AES_128_CBC": 2, } func (x MaybeEncryptedRecord_Algorithm) String() string { @@ -850,6 +857,8 @@ type NodeDescription struct { Engine *EngineDescription `protobuf:"bytes,4,opt,name=engine" json:"engine,omitempty"` // Information on the node's TLS setup TLSInfo *NodeTLSInfo `protobuf:"bytes,5,opt,name=tls_info,json=tlsInfo" json:"tls_info,omitempty"` + // FIPS indicates whether the node has FIPS-enabled + FIPS bool `protobuf:"varint,6,opt,name=fips,proto3" json:"fips,omitempty"` } func (m *NodeDescription) Reset() { *m = NodeDescription{} } @@ -2245,7 +2254,7 @@ func (m *Annotations) CopyFrom(src interface{}) { if o.Indices != nil { m.Indices = make([]IndexEntry, len(o.Indices)) for i := range m.Indices { - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Indices[i], &o.Indices[i]) + deepcopy.Copy(&m.Indices[i], &o.Indices[i]) } } @@ -2300,13 +2309,13 @@ func (m *GenericResource) CopyFrom(src interface{}) { v := GenericResource_NamedResourceSpec{ NamedResourceSpec: &NamedGenericResource{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.NamedResourceSpec, o.GetNamedResourceSpec()) + deepcopy.Copy(v.NamedResourceSpec, o.GetNamedResourceSpec()) m.Resource = &v case *GenericResource_DiscreteResourceSpec: v := GenericResource_DiscreteResourceSpec{ DiscreteResourceSpec: &DiscreteGenericResource{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.DiscreteResourceSpec, o.GetDiscreteResourceSpec()) + deepcopy.Copy(v.DiscreteResourceSpec, o.GetDiscreteResourceSpec()) m.Resource = &v } } @@ -2330,7 +2339,7 @@ func (m *Resources) CopyFrom(src interface{}) { m.Generic = make([]*GenericResource, len(o.Generic)) for i := range m.Generic { m.Generic[i] = &GenericResource{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Generic[i], o.Generic[i]) + deepcopy.Copy(m.Generic[i], o.Generic[i]) } } @@ -2351,11 +2360,11 @@ func (m *ResourceRequirements) CopyFrom(src interface{}) { *m = *o if o.Limits != nil { m.Limits = &Resources{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Limits, o.Limits) + deepcopy.Copy(m.Limits, o.Limits) } if o.Reservations != nil { m.Reservations = &Resources{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Reservations, o.Reservations) + deepcopy.Copy(m.Reservations, o.Reservations) } } @@ -2412,7 +2421,7 @@ func (m *EngineDescription) CopyFrom(src interface{}) { if o.Plugins != nil { m.Plugins = make([]PluginDescription, len(o.Plugins)) for i := range m.Plugins { - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Plugins[i], &o.Plugins[i]) + deepcopy.Copy(&m.Plugins[i], &o.Plugins[i]) } } @@ -2433,19 +2442,19 @@ func (m *NodeDescription) CopyFrom(src interface{}) { *m = *o if o.Platform != nil { m.Platform = &Platform{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Platform, o.Platform) + deepcopy.Copy(m.Platform, o.Platform) } if o.Resources != nil { m.Resources = &Resources{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Resources, o.Resources) + deepcopy.Copy(m.Resources, o.Resources) } if o.Engine != nil { m.Engine = &EngineDescription{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Engine, o.Engine) + deepcopy.Copy(m.Engine, o.Engine) } if o.TLSInfo != nil { m.TLSInfo = &NodeTLSInfo{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.TLSInfo, o.TLSInfo) + deepcopy.Copy(m.TLSInfo, o.TLSInfo) } } @@ -2536,15 +2545,15 @@ func (m *Mount) CopyFrom(src interface{}) { *m = *o if o.BindOptions != nil { m.BindOptions = &Mount_BindOptions{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.BindOptions, o.BindOptions) + deepcopy.Copy(m.BindOptions, o.BindOptions) } if o.VolumeOptions != nil { m.VolumeOptions = &Mount_VolumeOptions{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.VolumeOptions, o.VolumeOptions) + deepcopy.Copy(m.VolumeOptions, o.VolumeOptions) } if o.TmpfsOptions != nil { m.TmpfsOptions = &Mount_TmpfsOptions{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.TmpfsOptions, o.TmpfsOptions) + deepcopy.Copy(m.TmpfsOptions, o.TmpfsOptions) } } @@ -2585,7 +2594,7 @@ func (m *Mount_VolumeOptions) CopyFrom(src interface{}) { if o.DriverConfig != nil { m.DriverConfig = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.DriverConfig, o.DriverConfig) + deepcopy.Copy(m.DriverConfig, o.DriverConfig) } } @@ -2619,11 +2628,11 @@ func (m *RestartPolicy) CopyFrom(src interface{}) { *m = *o if o.Delay != nil { m.Delay = &google_protobuf1.Duration{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Delay, o.Delay) + deepcopy.Copy(m.Delay, o.Delay) } if o.Window != nil { m.Window = &google_protobuf1.Duration{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Window, o.Window) + deepcopy.Copy(m.Window, o.Window) } } @@ -2640,10 +2649,10 @@ func (m *UpdateConfig) CopyFrom(src interface{}) { o := src.(*UpdateConfig) *m = *o - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Delay, &o.Delay) + deepcopy.Copy(&m.Delay, &o.Delay) if o.Monitor != nil { m.Monitor = &google_protobuf1.Duration{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Monitor, o.Monitor) + deepcopy.Copy(m.Monitor, o.Monitor) } } @@ -2662,11 +2671,11 @@ func (m *UpdateStatus) CopyFrom(src interface{}) { *m = *o if o.StartedAt != nil { m.StartedAt = &google_protobuf.Timestamp{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.StartedAt, o.StartedAt) + deepcopy.Copy(m.StartedAt, o.StartedAt) } if o.CompletedAt != nil { m.CompletedAt = &google_protobuf.Timestamp{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.CompletedAt, o.CompletedAt) + deepcopy.Copy(m.CompletedAt, o.CompletedAt) } } @@ -2702,7 +2711,7 @@ func (m *PortStatus) CopyFrom(src interface{}) { m.Ports = make([]*PortConfig, len(o.Ports)) for i := range m.Ports { m.Ports[i] = &PortConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Ports[i], o.Ports[i]) + deepcopy.Copy(m.Ports[i], o.Ports[i]) } } @@ -2723,15 +2732,15 @@ func (m *TaskStatus) CopyFrom(src interface{}) { *m = *o if o.Timestamp != nil { m.Timestamp = &google_protobuf.Timestamp{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Timestamp, o.Timestamp) + deepcopy.Copy(m.Timestamp, o.Timestamp) } if o.PortStatus != nil { m.PortStatus = &PortStatus{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.PortStatus, o.PortStatus) + deepcopy.Copy(m.PortStatus, o.PortStatus) } if o.AppliedAt != nil { m.AppliedAt = &google_protobuf.Timestamp{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.AppliedAt, o.AppliedAt) + deepcopy.Copy(m.AppliedAt, o.AppliedAt) } if o.RuntimeStatus != nil { switch o.RuntimeStatus.(type) { @@ -2739,7 +2748,7 @@ func (m *TaskStatus) CopyFrom(src interface{}) { v := TaskStatus_Container{ Container: &ContainerStatus{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Container, o.GetContainer()) + deepcopy.Copy(v.Container, o.GetContainer()) m.RuntimeStatus = &v } } @@ -2852,13 +2861,13 @@ func (m *IPAMOptions) CopyFrom(src interface{}) { *m = *o if o.Driver != nil { m.Driver = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Driver, o.Driver) + deepcopy.Copy(m.Driver, o.Driver) } if o.Configs != nil { m.Configs = make([]*IPAMConfig, len(o.Configs)) for i := range m.Configs { m.Configs[i] = &IPAMConfig{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i]) + deepcopy.Copy(m.Configs[i], o.Configs[i]) } } @@ -2894,7 +2903,7 @@ func (m *WeightedPeer) CopyFrom(src interface{}) { *m = *o if o.Peer != nil { m.Peer = &Peer{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Peer, o.Peer) + deepcopy.Copy(m.Peer, o.Peer) } } @@ -2930,7 +2939,7 @@ func (m *AcceptancePolicy) CopyFrom(src interface{}) { m.Policies = make([]*AcceptancePolicy_RoleAdmissionPolicy, len(o.Policies)) for i := range m.Policies { m.Policies[i] = &AcceptancePolicy_RoleAdmissionPolicy{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Policies[i], o.Policies[i]) + deepcopy.Copy(m.Policies[i], o.Policies[i]) } } @@ -2951,7 +2960,7 @@ func (m *AcceptancePolicy_RoleAdmissionPolicy) CopyFrom(src interface{}) { *m = *o if o.Secret != nil { m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) + deepcopy.Copy(m.Secret, o.Secret) } } @@ -3015,13 +3024,13 @@ func (m *CAConfig) CopyFrom(src interface{}) { *m = *o if o.NodeCertExpiry != nil { m.NodeCertExpiry = &google_protobuf1.Duration{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.NodeCertExpiry, o.NodeCertExpiry) + deepcopy.Copy(m.NodeCertExpiry, o.NodeCertExpiry) } if o.ExternalCAs != nil { m.ExternalCAs = make([]*ExternalCA, len(o.ExternalCAs)) for i := range m.ExternalCAs { m.ExternalCAs[i] = &ExternalCA{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.ExternalCAs[i], o.ExternalCAs[i]) + deepcopy.Copy(m.ExternalCAs[i], o.ExternalCAs[i]) } } @@ -3065,7 +3074,7 @@ func (m *TaskDefaults) CopyFrom(src interface{}) { *m = *o if o.LogDriver != nil { m.LogDriver = &Driver{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.LogDriver, o.LogDriver) + deepcopy.Copy(m.LogDriver, o.LogDriver) } } @@ -3084,7 +3093,7 @@ func (m *DispatcherConfig) CopyFrom(src interface{}) { *m = *o if o.HeartbeatPeriod != nil { m.HeartbeatPeriod = &google_protobuf1.Duration{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.HeartbeatPeriod, o.HeartbeatPeriod) + deepcopy.Copy(m.HeartbeatPeriod, o.HeartbeatPeriod) } } @@ -3152,7 +3161,7 @@ func (m *PlacementPreference) CopyFrom(src interface{}) { v := PlacementPreference_Spread{ Spread: &SpreadOver{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Spread, o.GetSpread()) + deepcopy.Copy(v.Spread, o.GetSpread()) m.Preference = &v } } @@ -3181,7 +3190,7 @@ func (m *Placement) CopyFrom(src interface{}) { m.Preferences = make([]*PlacementPreference, len(o.Preferences)) for i := range m.Preferences { m.Preferences[i] = &PlacementPreference{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Preferences[i], o.Preferences[i]) + deepcopy.Copy(m.Preferences[i], o.Preferences[i]) } } @@ -3189,7 +3198,7 @@ func (m *Placement) CopyFrom(src interface{}) { m.Platforms = make([]*Platform, len(o.Platforms)) for i := range m.Platforms { m.Platforms[i] = &Platform{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Platforms[i], o.Platforms[i]) + deepcopy.Copy(m.Platforms[i], o.Platforms[i]) } } @@ -3231,10 +3240,10 @@ func (m *RootCA) CopyFrom(src interface{}) { m.CACert = make([]byte, len(o.CACert)) copy(m.CACert, o.CACert) } - github_com_docker_swarmkit_api_deepcopy.Copy(&m.JoinTokens, &o.JoinTokens) + deepcopy.Copy(&m.JoinTokens, &o.JoinTokens) if o.RootRotation != nil { m.RootRotation = &RootRotation{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.RootRotation, o.RootRotation) + deepcopy.Copy(m.RootRotation, o.RootRotation) } } @@ -3255,7 +3264,7 @@ func (m *Certificate) CopyFrom(src interface{}) { m.CSR = make([]byte, len(o.CSR)) copy(m.CSR, o.CSR) } - github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status) + deepcopy.Copy(&m.Status, &o.Status) if o.Certificate != nil { m.Certificate = make([]byte, len(o.Certificate)) copy(m.Certificate, o.Certificate) @@ -3330,7 +3339,7 @@ func (m *SecretReference) CopyFrom(src interface{}) { v := SecretReference_File{ File: &FileTarget{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.File, o.GetFile()) + deepcopy.Copy(v.File, o.GetFile()) m.Target = &v } } @@ -3356,7 +3365,7 @@ func (m *ConfigReference) CopyFrom(src interface{}) { v := ConfigReference_File{ File: &FileTarget{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.File, o.GetFile()) + deepcopy.Copy(v.File, o.GetFile()) m.Target = &v } } @@ -3378,7 +3387,7 @@ func (m *BlacklistedCertificate) CopyFrom(src interface{}) { *m = *o if o.Expiry != nil { m.Expiry = &google_protobuf.Timestamp{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Expiry, o.Expiry) + deepcopy.Copy(m.Expiry, o.Expiry) } } @@ -3402,15 +3411,15 @@ func (m *HealthConfig) CopyFrom(src interface{}) { if o.Interval != nil { m.Interval = &google_protobuf1.Duration{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Interval, o.Interval) + deepcopy.Copy(m.Interval, o.Interval) } if o.Timeout != nil { m.Timeout = &google_protobuf1.Duration{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Timeout, o.Timeout) + deepcopy.Copy(m.Timeout, o.Timeout) } if o.StartPeriod != nil { m.StartPeriod = &google_protobuf1.Duration{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.StartPeriod, o.StartPeriod) + deepcopy.Copy(m.StartPeriod, o.StartPeriod) } } @@ -3479,11 +3488,11 @@ func (m *Privileges) CopyFrom(src interface{}) { *m = *o if o.CredentialSpec != nil { m.CredentialSpec = &Privileges_CredentialSpec{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.CredentialSpec, o.CredentialSpec) + deepcopy.Copy(m.CredentialSpec, o.CredentialSpec) } if o.SELinuxContext != nil { m.SELinuxContext = &Privileges_SELinuxContext{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.SELinuxContext, o.SELinuxContext) + deepcopy.Copy(m.SELinuxContext, o.SELinuxContext) } } @@ -4002,6 +4011,16 @@ func (m *NodeDescription) MarshalTo(dAtA []byte) (int, error) { } i += n9 } + if m.FIPS { + dAtA[i] = 0x30 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -4395,8 +4414,8 @@ func (m *UpdateConfig) MarshalTo(dAtA []byte) (int, error) { } dAtA[i] = 0x12 i++ - i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.Delay))) - n16, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Delay, dAtA[i:]) + i = encodeVarintTypes(dAtA, i, uint64(types.SizeOfStdDuration(m.Delay))) + n16, err := types.StdDurationMarshalTo(m.Delay, dAtA[i:]) if err != nil { return 0, err } @@ -4419,7 +4438,8 @@ func (m *UpdateConfig) MarshalTo(dAtA []byte) (int, error) { if m.MaxFailureRatio != 0 { dAtA[i] = 0x2d i++ - i = encodeFixed32Types(dAtA, i, uint32(math.Float32bits(float32(m.MaxFailureRatio)))) + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.MaxFailureRatio)))) + i += 4 } if m.Order != 0 { dAtA[i] = 0x30 @@ -6125,24 +6145,6 @@ func (m *Privileges_SELinuxContext) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Types(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Types(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -6361,6 +6363,9 @@ func (m *NodeDescription) Size() (n int) { l = m.TLSInfo.Size() n += 1 + l + sovTypes(uint64(l)) } + if m.FIPS { + n += 2 + } return n } @@ -6528,7 +6533,7 @@ func (m *UpdateConfig) Size() (n int) { if m.Parallelism != 0 { n += 1 + sovTypes(uint64(m.Parallelism)) } - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Delay) + l = types.SizeOfStdDuration(m.Delay) n += 1 + l + sovTypes(uint64(l)) if m.FailureAction != 0 { n += 1 + sovTypes(uint64(m.FailureAction)) @@ -7352,7 +7357,7 @@ func (this *Annotations) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -7471,7 +7476,7 @@ func (this *EngineDescription) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -7495,6 +7500,7 @@ func (this *NodeDescription) String() string { `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "Resources", 1) + `,`, `Engine:` + strings.Replace(fmt.Sprintf("%v", this.Engine), "EngineDescription", "EngineDescription", 1) + `,`, `TLSInfo:` + strings.Replace(fmt.Sprintf("%v", this.TLSInfo), "NodeTLSInfo", "NodeTLSInfo", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, `}`, }, "") return s @@ -7580,7 +7586,7 @@ func (this *Mount_VolumeOptions) String() string { for k, _ := range this.Labels { keysForLabels = append(keysForLabels, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + sortkeys.Strings(keysForLabels) mapStringForLabels := "map[string]string{" for _, k := range keysForLabels { mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) @@ -7703,7 +7709,7 @@ func (this *NetworkAttachmentConfig) String() string { for k, _ := range this.DriverAttachmentOpts { keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForDriverAttachmentOpts) + sortkeys.Strings(keysForDriverAttachmentOpts) mapStringForDriverAttachmentOpts := "map[string]string{" for _, k := range keysForDriverAttachmentOpts { mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) @@ -7726,7 +7732,7 @@ func (this *IPAMConfig) String() string { for k, _ := range this.Reserved { keysForReserved = append(keysForReserved, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForReserved) + sortkeys.Strings(keysForReserved) mapStringForReserved := "map[string]string{" for _, k := range keysForReserved { mapStringForReserved += fmt.Sprintf("%v: %v,", k, this.Reserved[k]) @@ -7764,7 +7770,7 @@ func (this *Driver) String() string { for k, _ := range this.Options { keysForOptions = append(keysForOptions, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + sortkeys.Strings(keysForOptions) mapStringForOptions := "map[string]string{" for _, k := range keysForOptions { mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) @@ -7862,7 +7868,7 @@ func (this *ExternalCA) String() string { for k, _ := range this.Options { keysForOptions = append(keysForOptions, k) } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + sortkeys.Strings(keysForOptions) mapStringForOptions := "map[string]string{" for _, k := range keysForOptions { mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) @@ -8482,51 +8488,14 @@ func (m *Annotations) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8536,41 +8505,80 @@ func (m *Annotations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -9479,51 +9487,14 @@ func (m *EngineDescription) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9533,41 +9504,80 @@ func (m *EngineDescription) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -9811,6 +9821,26 @@ func (m *NodeDescription) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -10708,51 +10738,14 @@ func (m *Mount_VolumeOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10762,41 +10755,80 @@ func (m *Mount_VolumeOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -11168,7 +11200,7 @@ func (m *UpdateConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Delay, dAtA[iNdEx:postIndex]); err != nil { + if err := types.StdDurationUnmarshal(&m.Delay, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11232,11 +11264,8 @@ func (m *UpdateConfig) Unmarshal(dAtA []byte) error { if (iNdEx + 4) > l { return io.ErrUnexpectedEOF } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) iNdEx += 4 - v = uint32(dAtA[iNdEx-4]) - v |= uint32(dAtA[iNdEx-3]) << 8 - v |= uint32(dAtA[iNdEx-2]) << 16 - v |= uint32(dAtA[iNdEx-1]) << 24 m.MaxFailureRatio = float32(math.Float32frombits(v)) case 6: if wireType != 0 { @@ -12069,51 +12098,14 @@ func (m *NetworkAttachmentConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.DriverAttachmentOpts == nil { m.DriverAttachmentOpts = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12123,41 +12115,80 @@ func (m *NetworkAttachmentConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.DriverAttachmentOpts[mapkey] = mapvalue - } else { - var mapvalue string - m.DriverAttachmentOpts[mapkey] = mapvalue } + m.DriverAttachmentOpts[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -12341,51 +12372,14 @@ func (m *IPAMConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Reserved == nil { m.Reserved = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12395,41 +12389,80 @@ func (m *IPAMConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Reserved[mapkey] = mapvalue - } else { - var mapvalue string - m.Reserved[mapkey] = mapvalue } + m.Reserved[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -12691,51 +12724,14 @@ func (m *Driver) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12745,41 +12741,80 @@ func (m *Driver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue } + m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -13640,51 +13675,14 @@ func (m *ExternalCA) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -13694,41 +13692,80 @@ func (m *ExternalCA) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTypes - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue } + m.Options[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -17043,321 +17080,325 @@ var ( func init() { proto.RegisterFile("github.com/docker/swarmkit/api/types.proto", fileDescriptorTypes) } var fileDescriptorTypes = []byte{ - // 5054 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4d, 0x6c, 0x24, 0x49, - 0x56, 0x76, 0xfd, 0xba, 0xea, 0x55, 0xd9, 0x4e, 0x47, 0x7b, 0x7b, 0xdc, 0xb5, 0xdd, 0x76, 0x4d, - 0xce, 0xf4, 0xce, 0x6c, 0x6f, 0x53, 0xfd, 0xb7, 0xbb, 0xea, 0x99, 0x61, 0x77, 0xa6, 0xfe, 0x6c, - 0xd7, 0xb6, 0x5d, 0x55, 0x8a, 0x2a, 0x77, 0xef, 0x22, 0x41, 0x92, 0xce, 0x0c, 0x97, 0x73, 0x9c, - 0x95, 0x51, 0x64, 0x66, 0xd9, 0x5d, 0x2c, 0x88, 0x16, 0x07, 0x40, 0x3e, 0xc1, 0x89, 0x45, 0xc8, - 0x08, 0x09, 0x8e, 0x48, 0x1c, 0x40, 0x42, 0x70, 0x1a, 0x24, 0x84, 0xf6, 0x06, 0x0b, 0x12, 0x5a, - 0x81, 0x64, 0x58, 0x1f, 0xb8, 0xad, 0xe0, 0x82, 0xb8, 0x80, 0x84, 0xe2, 0x27, 0xb3, 0xd2, 0xd5, - 0x69, 0xbb, 0x87, 0xdd, 0x8b, 0x5d, 0xf1, 0xde, 0xf7, 0x5e, 0x44, 0xbc, 0x88, 0x78, 0xf1, 0xde, - 0x8b, 0x84, 0x7b, 0x03, 0xcb, 0x3f, 0x18, 0xef, 0x55, 0x0c, 0x3a, 0x7c, 0x60, 0x52, 0xe3, 0x90, - 0xb8, 0x0f, 0xbc, 0x63, 0xdd, 0x1d, 0x1e, 0x5a, 0xfe, 0x03, 0x7d, 0x64, 0x3d, 0xf0, 0x27, 0x23, - 0xe2, 0x55, 0x46, 0x2e, 0xf5, 0x29, 0x42, 0x02, 0x50, 0x09, 0x00, 0x95, 0xa3, 0x47, 0xa5, 0xf5, - 0x01, 0xa5, 0x03, 0x9b, 0x3c, 0xe0, 0x88, 0xbd, 0xf1, 0xfe, 0x03, 0xdf, 0x1a, 0x12, 0xcf, 0xd7, - 0x87, 0x23, 0x21, 0x54, 0x5a, 0x9b, 0x05, 0x98, 0x63, 0x57, 0xf7, 0x2d, 0xea, 0x48, 0xfe, 0xca, - 0x80, 0x0e, 0x28, 0xff, 0xf9, 0x80, 0xfd, 0x12, 0x54, 0x75, 0x1d, 0xe6, 0x9f, 0x13, 0xd7, 0xb3, - 0xa8, 0x83, 0x56, 0x20, 0x63, 0x39, 0x26, 0x79, 0xb9, 0x9a, 0x28, 0x27, 0xde, 0x4f, 0x63, 0xd1, - 0x50, 0x1f, 0x02, 0xb4, 0xd8, 0x8f, 0xa6, 0xe3, 0xbb, 0x13, 0xa4, 0x40, 0xea, 0x90, 0x4c, 0x38, - 0x22, 0x8f, 0xd9, 0x4f, 0x46, 0x39, 0xd2, 0xed, 0xd5, 0xa4, 0xa0, 0x1c, 0xe9, 0xb6, 0xfa, 0xa3, - 0x04, 0x14, 0xaa, 0x8e, 0x43, 0x7d, 0xde, 0xbb, 0x87, 0x10, 0xa4, 0x1d, 0x7d, 0x48, 0xa4, 0x10, - 0xff, 0x8d, 0xea, 0x90, 0xb5, 0xf5, 0x3d, 0x62, 0x7b, 0xab, 0xc9, 0x72, 0xea, 0xfd, 0xc2, 0xe3, - 0xaf, 0x54, 0x5e, 0x9f, 0x72, 0x25, 0xa2, 0xa4, 0xb2, 0xcd, 0xd1, 0x7c, 0x10, 0x58, 0x8a, 0xa2, - 0x6f, 0xc2, 0xbc, 0xe5, 0x98, 0x96, 0x41, 0xbc, 0xd5, 0x34, 0xd7, 0xb2, 0x16, 0xa7, 0x65, 0x3a, - 0xfa, 0x5a, 0xfa, 0xfb, 0x67, 0xeb, 0x73, 0x38, 0x10, 0x2a, 0x7d, 0x00, 0x85, 0x88, 0xda, 0x98, - 0xb9, 0xad, 0x40, 0xe6, 0x48, 0xb7, 0xc7, 0x44, 0xce, 0x4e, 0x34, 0x3e, 0x4c, 0x3e, 0x4d, 0xa8, - 0x9f, 0xc0, 0x4a, 0x5b, 0x1f, 0x12, 0x73, 0x93, 0x38, 0xc4, 0xb5, 0x0c, 0x4c, 0x3c, 0x3a, 0x76, - 0x0d, 0xc2, 0xe6, 0x7a, 0x68, 0x39, 0x66, 0x30, 0x57, 0xf6, 0x3b, 0x5e, 0x8b, 0x5a, 0x87, 0xb7, - 0x1a, 0x96, 0x67, 0xb8, 0xc4, 0x27, 0x9f, 0x5b, 0x49, 0x2a, 0x50, 0x72, 0x96, 0x80, 0xa5, 0x59, - 0xe9, 0x9f, 0x83, 0x1b, 0xcc, 0xc4, 0xa6, 0xe6, 0x4a, 0x8a, 0xe6, 0x8d, 0x88, 0xc1, 0x95, 0x15, - 0x1e, 0xbf, 0x1f, 0x67, 0xa1, 0xb8, 0x99, 0x6c, 0xcd, 0xe1, 0x65, 0xae, 0x26, 0x20, 0xf4, 0x46, - 0xc4, 0x40, 0x06, 0xdc, 0x34, 0xe5, 0xa0, 0x67, 0xd4, 0x27, 0xb9, 0xfa, 0xd8, 0x65, 0xbc, 0x64, - 0x9a, 0x5b, 0x73, 0x78, 0x25, 0x50, 0x16, 0xed, 0xa4, 0x06, 0x90, 0x0b, 0x74, 0xab, 0xdf, 0x4b, - 0x40, 0x3e, 0x60, 0x7a, 0xe8, 0xcb, 0x90, 0x77, 0x74, 0x87, 0x6a, 0xc6, 0x68, 0xec, 0xf1, 0x09, - 0xa5, 0x6a, 0xc5, 0xf3, 0xb3, 0xf5, 0x5c, 0x5b, 0x77, 0x68, 0xbd, 0xbb, 0xeb, 0xe1, 0x1c, 0x63, - 0xd7, 0x47, 0x63, 0x0f, 0xbd, 0x0d, 0xc5, 0x21, 0x19, 0x52, 0x77, 0xa2, 0xed, 0x4d, 0x7c, 0xe2, - 0x49, 0xb3, 0x15, 0x04, 0xad, 0xc6, 0x48, 0xe8, 0x1b, 0x30, 0x3f, 0x10, 0x43, 0x5a, 0x4d, 0xf1, - 0xed, 0xf3, 0x4e, 0xdc, 0xe8, 0x67, 0x46, 0x8d, 0x03, 0x19, 0xf5, 0xb7, 0x13, 0xb0, 0x12, 0x52, - 0xc9, 0x2f, 0x8d, 0x2d, 0x97, 0x0c, 0x89, 0xe3, 0x7b, 0xe8, 0x6b, 0x90, 0xb5, 0xad, 0xa1, 0xe5, - 0x7b, 0xd2, 0xe6, 0x77, 0xe2, 0xd4, 0x86, 0x93, 0xc2, 0x12, 0x8c, 0xaa, 0x50, 0x74, 0x89, 0x47, - 0xdc, 0x23, 0xb1, 0xe3, 0xa5, 0x45, 0xaf, 0x11, 0xbe, 0x20, 0xa2, 0x6e, 0x40, 0xae, 0x6b, 0xeb, - 0xfe, 0x3e, 0x75, 0x87, 0x48, 0x85, 0xa2, 0xee, 0x1a, 0x07, 0x96, 0x4f, 0x0c, 0x7f, 0xec, 0x06, - 0xa7, 0xef, 0x02, 0x0d, 0xdd, 0x84, 0x24, 0x15, 0x1d, 0xe5, 0x6b, 0xd9, 0xf3, 0xb3, 0xf5, 0x64, - 0xa7, 0x87, 0x93, 0xd4, 0x53, 0x3f, 0x82, 0xe5, 0xae, 0x3d, 0x1e, 0x58, 0x4e, 0x83, 0x78, 0x86, - 0x6b, 0x8d, 0x98, 0x76, 0xb6, 0x2b, 0x99, 0x8f, 0x0a, 0x76, 0x25, 0xfb, 0x1d, 0x1e, 0xed, 0xe4, - 0xf4, 0x68, 0xab, 0xbf, 0x99, 0x84, 0xe5, 0xa6, 0x33, 0xb0, 0x1c, 0x12, 0x95, 0xbe, 0x0b, 0x8b, - 0x84, 0x13, 0xb5, 0x23, 0xe1, 0x6e, 0xa4, 0x9e, 0x05, 0x41, 0x0d, 0x7c, 0x50, 0x6b, 0xc6, 0x2f, - 0x3c, 0x8a, 0x9b, 0xfe, 0x6b, 0xda, 0x63, 0xbd, 0x43, 0x13, 0xe6, 0x47, 0x7c, 0x12, 0x9e, 0x5c, - 0xde, 0xbb, 0x71, 0xba, 0x5e, 0x9b, 0x67, 0xe0, 0x24, 0xa4, 0xec, 0x4f, 0xe2, 0x24, 0xfe, 0x24, - 0x09, 0x4b, 0x6d, 0x6a, 0x5e, 0xb0, 0x43, 0x09, 0x72, 0x07, 0xd4, 0xf3, 0x23, 0x0e, 0x31, 0x6c, - 0xa3, 0xa7, 0x90, 0x1b, 0xc9, 0xe5, 0x93, 0xab, 0x7f, 0x3b, 0x7e, 0xc8, 0x02, 0x83, 0x43, 0x34, - 0xfa, 0x08, 0xf2, 0xc1, 0x91, 0x61, 0xb3, 0x7d, 0x83, 0x8d, 0x33, 0xc5, 0xa3, 0x6f, 0x40, 0x56, - 0x2c, 0xc2, 0x6a, 0x9a, 0x4b, 0xde, 0x7d, 0x23, 0x9b, 0x63, 0x29, 0x84, 0x36, 0x21, 0xe7, 0xdb, - 0x9e, 0x66, 0x39, 0xfb, 0x74, 0x35, 0xc3, 0x15, 0xac, 0xc7, 0x3a, 0x19, 0x6a, 0x92, 0xfe, 0x76, - 0xaf, 0xe5, 0xec, 0xd3, 0x5a, 0xe1, 0xfc, 0x6c, 0x7d, 0x5e, 0x36, 0xf0, 0xbc, 0x6f, 0x7b, 0xec, - 0x87, 0xfa, 0x3b, 0x09, 0x28, 0x44, 0x50, 0xe8, 0x0e, 0x80, 0xef, 0x8e, 0x3d, 0x5f, 0x73, 0x29, - 0xf5, 0xb9, 0xb1, 0x8a, 0x38, 0xcf, 0x29, 0x98, 0x52, 0x1f, 0x55, 0xe0, 0x86, 0x41, 0x5c, 0x5f, - 0xb3, 0x3c, 0x6f, 0x4c, 0x5c, 0xcd, 0x1b, 0xef, 0x7d, 0x4a, 0x0c, 0x9f, 0x1b, 0xae, 0x88, 0x97, - 0x19, 0xab, 0xc5, 0x39, 0x3d, 0xc1, 0x40, 0x4f, 0xe0, 0x66, 0x14, 0x3f, 0x1a, 0xef, 0xd9, 0x96, - 0xa1, 0xb1, 0xc5, 0x4c, 0x71, 0x91, 0x1b, 0x53, 0x91, 0x2e, 0xe7, 0x3d, 0x23, 0x13, 0xf5, 0x87, - 0x09, 0x50, 0xb0, 0xbe, 0xef, 0xef, 0x90, 0xe1, 0x1e, 0x71, 0x7b, 0xbe, 0xee, 0x8f, 0x3d, 0x74, - 0x13, 0xb2, 0x36, 0xd1, 0x4d, 0xe2, 0xf2, 0x41, 0xe5, 0xb0, 0x6c, 0xa1, 0x5d, 0x76, 0x82, 0x75, - 0xe3, 0x40, 0xdf, 0xb3, 0x6c, 0xcb, 0x9f, 0xf0, 0xa1, 0x2c, 0xc6, 0x6f, 0xe1, 0x59, 0x9d, 0x15, - 0x1c, 0x11, 0xc4, 0x17, 0xd4, 0xa0, 0x55, 0x98, 0x1f, 0x12, 0xcf, 0xd3, 0x07, 0x84, 0x8f, 0x34, - 0x8f, 0x83, 0xa6, 0xfa, 0x11, 0x14, 0xa3, 0x72, 0xa8, 0x00, 0xf3, 0xbb, 0xed, 0x67, 0xed, 0xce, - 0x8b, 0xb6, 0x32, 0x87, 0x96, 0xa0, 0xb0, 0xdb, 0xc6, 0xcd, 0x6a, 0x7d, 0xab, 0x5a, 0xdb, 0x6e, - 0x2a, 0x09, 0xb4, 0x00, 0xf9, 0x69, 0x33, 0xa9, 0xfe, 0x59, 0x02, 0x80, 0x99, 0x5b, 0x4e, 0xea, - 0x43, 0xc8, 0x78, 0xbe, 0xee, 0x8b, 0x5d, 0xb9, 0xf8, 0xf8, 0xdd, 0xcb, 0xd6, 0x50, 0x8e, 0x97, - 0xfd, 0x23, 0x58, 0x88, 0x44, 0x47, 0x98, 0xbc, 0x30, 0x42, 0xe6, 0x20, 0x74, 0xd3, 0x74, 0xe5, - 0xc0, 0xf9, 0x6f, 0xf5, 0x23, 0xc8, 0x70, 0xe9, 0x8b, 0xc3, 0xcd, 0x41, 0xba, 0xc1, 0x7e, 0x25, - 0x50, 0x1e, 0x32, 0xb8, 0x59, 0x6d, 0x7c, 0x47, 0x49, 0x22, 0x05, 0x8a, 0x8d, 0x56, 0xaf, 0xde, - 0x69, 0xb7, 0x9b, 0xf5, 0x7e, 0xb3, 0xa1, 0xa4, 0xd4, 0xbb, 0x90, 0x69, 0x0d, 0x99, 0xe6, 0xdb, - 0x6c, 0xcb, 0xef, 0x13, 0x97, 0x38, 0x46, 0x70, 0x92, 0xa6, 0x04, 0xf5, 0xc7, 0x05, 0xc8, 0xec, - 0xd0, 0xb1, 0xe3, 0xa3, 0xc7, 0x11, 0xb7, 0xb5, 0x18, 0x1f, 0x21, 0x70, 0x60, 0xa5, 0x3f, 0x19, - 0x11, 0xe9, 0xd6, 0x6e, 0x42, 0x56, 0x1c, 0x0e, 0x39, 0x1d, 0xd9, 0x62, 0x74, 0x5f, 0x77, 0x07, - 0xc4, 0x97, 0xf3, 0x91, 0x2d, 0xf4, 0x3e, 0xbb, 0xb1, 0x74, 0x93, 0x3a, 0xf6, 0x84, 0x9f, 0xa1, - 0x9c, 0xb8, 0x96, 0x30, 0xd1, 0xcd, 0x8e, 0x63, 0x4f, 0x70, 0xc8, 0x45, 0x5b, 0x50, 0xdc, 0xb3, - 0x1c, 0x53, 0xa3, 0x23, 0xe1, 0xe4, 0x33, 0x97, 0x9f, 0x38, 0x31, 0xaa, 0x9a, 0xe5, 0x98, 0x1d, - 0x01, 0xc6, 0x85, 0xbd, 0x69, 0x03, 0xb5, 0x61, 0xf1, 0x88, 0xda, 0xe3, 0x21, 0x09, 0x75, 0x65, - 0xb9, 0xae, 0xf7, 0x2e, 0xd7, 0xf5, 0x9c, 0xe3, 0x03, 0x6d, 0x0b, 0x47, 0xd1, 0x26, 0x7a, 0x06, - 0x0b, 0xfe, 0x70, 0xb4, 0xef, 0x85, 0xea, 0xe6, 0xb9, 0xba, 0x2f, 0x5d, 0x61, 0x30, 0x06, 0x0f, - 0xb4, 0x15, 0xfd, 0x48, 0x0b, 0x6d, 0x42, 0xc1, 0xa0, 0x8e, 0x67, 0x79, 0x3e, 0x71, 0x8c, 0xc9, - 0x6a, 0x8e, 0xdb, 0xfe, 0x8a, 0x59, 0xd6, 0xa7, 0x60, 0x1c, 0x95, 0x2c, 0xfd, 0x7a, 0x0a, 0x0a, - 0x11, 0x13, 0xa0, 0x1e, 0x14, 0x46, 0x2e, 0x1d, 0xe9, 0x03, 0x7e, 0xe3, 0xc9, 0x45, 0x7d, 0xf4, - 0x46, 0xe6, 0xab, 0x74, 0xa7, 0x82, 0x38, 0xaa, 0x45, 0x3d, 0x4d, 0x42, 0x21, 0xc2, 0x44, 0xf7, - 0x20, 0x87, 0xbb, 0xb8, 0xf5, 0xbc, 0xda, 0x6f, 0x2a, 0x73, 0xa5, 0xdb, 0x27, 0xa7, 0xe5, 0x55, - 0xae, 0x2d, 0xaa, 0xa0, 0xeb, 0x5a, 0x47, 0x6c, 0x0f, 0xbf, 0x0f, 0xf3, 0x01, 0x34, 0x51, 0xfa, - 0xe2, 0xc9, 0x69, 0xf9, 0xad, 0x59, 0x68, 0x04, 0x89, 0x7b, 0x5b, 0x55, 0xdc, 0x6c, 0x28, 0xc9, - 0x78, 0x24, 0xee, 0x1d, 0xe8, 0x2e, 0x31, 0xd1, 0x97, 0x20, 0x2b, 0x81, 0xa9, 0x52, 0xe9, 0xe4, - 0xb4, 0x7c, 0x73, 0x16, 0x38, 0xc5, 0xe1, 0xde, 0x76, 0xf5, 0x79, 0x53, 0x49, 0xc7, 0xe3, 0x70, - 0xcf, 0xd6, 0x8f, 0x08, 0x7a, 0x17, 0x32, 0x02, 0x96, 0x29, 0xdd, 0x3a, 0x39, 0x2d, 0x7f, 0xe1, - 0x35, 0x75, 0x0c, 0x55, 0x5a, 0xfd, 0xad, 0x3f, 0x5a, 0x9b, 0xfb, 0xab, 0x3f, 0x5e, 0x53, 0x66, - 0xd9, 0xa5, 0xff, 0x49, 0xc0, 0xc2, 0x85, 0xbd, 0x83, 0x54, 0xc8, 0x3a, 0xd4, 0xa0, 0x23, 0x71, - 0x11, 0xe6, 0x6a, 0x70, 0x7e, 0xb6, 0x9e, 0x6d, 0xd3, 0x3a, 0x1d, 0x4d, 0xb0, 0xe4, 0xa0, 0x67, - 0x33, 0x57, 0xf9, 0x93, 0x37, 0xdc, 0x98, 0xb1, 0x97, 0xf9, 0xc7, 0xb0, 0x60, 0xba, 0xd6, 0x11, - 0x71, 0x35, 0x83, 0x3a, 0xfb, 0xd6, 0x40, 0x5e, 0x72, 0xa5, 0xd8, 0x78, 0x93, 0x03, 0x71, 0x51, - 0x08, 0xd4, 0x39, 0xfe, 0x27, 0xb8, 0xc6, 0x4b, 0xcf, 0xa1, 0x18, 0xdd, 0xea, 0xec, 0x5e, 0xf2, - 0xac, 0x5f, 0x26, 0x32, 0xb0, 0xe4, 0x61, 0x28, 0xce, 0x33, 0x8a, 0x08, 0x2b, 0xdf, 0x83, 0xf4, - 0x90, 0x9a, 0x42, 0xcf, 0x42, 0xed, 0x06, 0x8b, 0x26, 0xfe, 0xf9, 0x6c, 0xbd, 0x40, 0xbd, 0xca, - 0x86, 0x65, 0x93, 0x1d, 0x6a, 0x12, 0xcc, 0x01, 0xea, 0x11, 0xa4, 0x99, 0xcf, 0x41, 0x5f, 0x84, - 0x74, 0xad, 0xd5, 0x6e, 0x28, 0x73, 0xa5, 0xe5, 0x93, 0xd3, 0xf2, 0x02, 0x37, 0x09, 0x63, 0xb0, - 0xbd, 0x8b, 0xd6, 0x21, 0xfb, 0xbc, 0xb3, 0xbd, 0xbb, 0xc3, 0xb6, 0xd7, 0x8d, 0x93, 0xd3, 0xf2, - 0x52, 0xc8, 0x16, 0x46, 0x43, 0x77, 0x20, 0xd3, 0xdf, 0xe9, 0x6e, 0xf4, 0x94, 0x64, 0x09, 0x9d, - 0x9c, 0x96, 0x17, 0x43, 0x3e, 0x1f, 0x73, 0x69, 0x59, 0xae, 0x6a, 0x3e, 0xa4, 0xab, 0x3f, 0x48, - 0x40, 0x21, 0x72, 0xe0, 0xd8, 0xc6, 0x6c, 0x34, 0x37, 0xaa, 0xbb, 0xdb, 0x7d, 0x65, 0x2e, 0xb2, - 0x31, 0x23, 0x90, 0x06, 0xd9, 0xd7, 0xc7, 0x36, 0xf3, 0x73, 0x50, 0xef, 0xb4, 0x7b, 0xad, 0x5e, - 0xbf, 0xd9, 0xee, 0x2b, 0x89, 0xd2, 0xea, 0xc9, 0x69, 0x79, 0x65, 0x16, 0xbc, 0x31, 0xb6, 0x6d, - 0xb6, 0x35, 0xeb, 0xd5, 0xfa, 0x16, 0xdf, 0xeb, 0xd3, 0xad, 0x19, 0x41, 0xd5, 0x75, 0xe3, 0x80, - 0x98, 0xe8, 0x3e, 0xe4, 0x1b, 0xcd, 0xed, 0xe6, 0x66, 0x95, 0x7b, 0xf7, 0xd2, 0x9d, 0x93, 0xd3, - 0xf2, 0xad, 0xd7, 0x7b, 0xb7, 0xc9, 0x40, 0xf7, 0x89, 0x39, 0xb3, 0x45, 0x23, 0x10, 0xf5, 0xbf, - 0x92, 0xb0, 0x80, 0x59, 0x3a, 0xec, 0xfa, 0x5d, 0x6a, 0x5b, 0xc6, 0x04, 0x75, 0x21, 0x6f, 0x50, - 0xc7, 0xb4, 0x22, 0x7e, 0xe2, 0xf1, 0x25, 0x21, 0xd1, 0x54, 0x2a, 0x68, 0xd5, 0x03, 0x49, 0x3c, - 0x55, 0x82, 0x1e, 0x40, 0xc6, 0x24, 0xb6, 0x3e, 0x91, 0xb1, 0xd9, 0xad, 0x8a, 0x48, 0xb8, 0x2b, - 0x41, 0xc2, 0x5d, 0x69, 0xc8, 0x84, 0x1b, 0x0b, 0x1c, 0xcf, 0x41, 0xf4, 0x97, 0x9a, 0xee, 0xfb, - 0x64, 0x38, 0xf2, 0x45, 0x60, 0x96, 0xc6, 0x85, 0xa1, 0xfe, 0xb2, 0x2a, 0x49, 0xe8, 0x11, 0x64, - 0x8f, 0x2d, 0xc7, 0xa4, 0xc7, 0x32, 0xf6, 0xba, 0x42, 0xa9, 0x04, 0xaa, 0x27, 0x2c, 0x24, 0x99, - 0x19, 0x26, 0xdb, 0x43, 0xed, 0x4e, 0xbb, 0x19, 0xec, 0x21, 0xc9, 0xef, 0x38, 0x6d, 0xea, 0xb0, - 0xf3, 0x0f, 0x9d, 0xb6, 0xb6, 0x51, 0x6d, 0x6d, 0xef, 0x62, 0xb6, 0x8f, 0x56, 0x4e, 0x4e, 0xcb, - 0x4a, 0x08, 0xd9, 0xd0, 0x2d, 0x9b, 0x25, 0x03, 0xb7, 0x20, 0x55, 0x6d, 0x7f, 0x47, 0x49, 0x96, - 0x94, 0x93, 0xd3, 0x72, 0x31, 0x64, 0x57, 0x9d, 0xc9, 0xd4, 0xee, 0xb3, 0xfd, 0xaa, 0x7f, 0x97, - 0x82, 0xe2, 0xee, 0xc8, 0xd4, 0x7d, 0x22, 0xce, 0x19, 0x2a, 0x43, 0x61, 0xa4, 0xbb, 0xba, 0x6d, - 0x13, 0xdb, 0xf2, 0x86, 0xb2, 0x94, 0x10, 0x25, 0xa1, 0x0f, 0xde, 0xd4, 0x8c, 0xb5, 0x1c, 0x3b, - 0x3b, 0xdf, 0xfb, 0xd7, 0xf5, 0x44, 0x60, 0xd0, 0x5d, 0x58, 0xdc, 0x17, 0xa3, 0xd5, 0x74, 0x83, - 0x2f, 0x6c, 0x8a, 0x2f, 0x6c, 0x25, 0x6e, 0x61, 0xa3, 0xc3, 0xaa, 0xc8, 0x49, 0x56, 0xb9, 0x14, - 0x5e, 0xd8, 0x8f, 0x36, 0xd1, 0x13, 0x98, 0x1f, 0x52, 0xc7, 0xf2, 0xa9, 0x7b, 0xfd, 0x2a, 0x04, - 0x48, 0x74, 0x0f, 0x96, 0xd9, 0xe2, 0x06, 0xe3, 0xe1, 0x6c, 0x7e, 0x9d, 0x27, 0xf1, 0xd2, 0x50, - 0x7f, 0x29, 0x3b, 0xc4, 0x8c, 0x8c, 0x6a, 0x90, 0xa1, 0x2e, 0x8b, 0x17, 0xb3, 0x7c, 0xb8, 0xf7, - 0xaf, 0x1d, 0xae, 0x68, 0x74, 0x98, 0x0c, 0x16, 0xa2, 0xea, 0xd7, 0x61, 0xe1, 0xc2, 0x24, 0x58, - 0x98, 0xd4, 0xad, 0xee, 0xf6, 0x9a, 0xca, 0x1c, 0x2a, 0x42, 0xae, 0xde, 0x69, 0xf7, 0x5b, 0xed, - 0x5d, 0x16, 0xe7, 0x15, 0x21, 0x87, 0x3b, 0xdb, 0xdb, 0xb5, 0x6a, 0xfd, 0x99, 0x92, 0x54, 0x2b, - 0x50, 0x88, 0x68, 0x43, 0x8b, 0x00, 0xbd, 0x7e, 0xa7, 0xab, 0x6d, 0xb4, 0x70, 0xaf, 0x2f, 0xa2, - 0xc4, 0x5e, 0xbf, 0x8a, 0xfb, 0x92, 0x90, 0x50, 0xff, 0x23, 0x19, 0xac, 0xa8, 0x0c, 0x0c, 0x6b, - 0x17, 0x03, 0xc3, 0x2b, 0x06, 0x2f, 0x43, 0xc3, 0x69, 0x23, 0x0c, 0x10, 0x3f, 0x00, 0xe0, 0x1b, - 0x87, 0x98, 0x9a, 0xee, 0xcb, 0x85, 0x2f, 0xbd, 0x66, 0xe4, 0x7e, 0x50, 0xd1, 0xc2, 0x79, 0x89, - 0xae, 0xfa, 0xe8, 0x1b, 0x50, 0x34, 0xe8, 0x70, 0x64, 0x13, 0x29, 0x9c, 0xba, 0x56, 0xb8, 0x10, - 0xe2, 0xab, 0x7e, 0x34, 0x34, 0x4d, 0x5f, 0x0c, 0x9e, 0x7f, 0x23, 0x11, 0x58, 0x26, 0x26, 0x1a, - 0x2d, 0x42, 0x6e, 0xb7, 0xdb, 0xa8, 0xf6, 0x5b, 0xed, 0x4d, 0x25, 0x81, 0x00, 0xb2, 0xdc, 0xd4, - 0x0d, 0x25, 0xc9, 0xa2, 0xe8, 0x7a, 0x67, 0xa7, 0xbb, 0xdd, 0xe4, 0x1e, 0x0b, 0xad, 0x80, 0x12, - 0x18, 0x5b, 0xe3, 0x86, 0x6c, 0x36, 0x94, 0x34, 0xba, 0x01, 0x4b, 0x21, 0x55, 0x4a, 0x66, 0xd0, - 0x4d, 0x40, 0x21, 0x71, 0xaa, 0x22, 0xab, 0xfe, 0x2a, 0x2c, 0xd5, 0xa9, 0xe3, 0xeb, 0x96, 0x13, - 0x66, 0x18, 0x8f, 0xd9, 0xa4, 0x25, 0x49, 0xb3, 0x64, 0x25, 0xa8, 0xb6, 0x74, 0x7e, 0xb6, 0x5e, - 0x08, 0xa1, 0xad, 0x06, 0x0f, 0x95, 0x64, 0xc3, 0x64, 0xe7, 0x77, 0x64, 0x99, 0xdc, 0xb8, 0x99, - 0xda, 0xfc, 0xf9, 0xd9, 0x7a, 0xaa, 0xdb, 0x6a, 0x60, 0x46, 0x43, 0x5f, 0x84, 0x3c, 0x79, 0x69, - 0xf9, 0x9a, 0xc1, 0xee, 0x25, 0x66, 0xc0, 0x0c, 0xce, 0x31, 0x42, 0x9d, 0x5d, 0x43, 0x35, 0x80, - 0x2e, 0x75, 0x7d, 0xd9, 0xf3, 0x57, 0x21, 0x33, 0xa2, 0x2e, 0xaf, 0x5d, 0x5c, 0x5a, 0x51, 0x63, - 0x70, 0xb1, 0x51, 0xb1, 0x00, 0xab, 0xbf, 0x97, 0x02, 0xe8, 0xeb, 0xde, 0xa1, 0x54, 0xf2, 0x14, - 0xf2, 0x61, 0x75, 0x52, 0x16, 0x41, 0xae, 0x5c, 0xed, 0x10, 0x8c, 0x9e, 0x04, 0x9b, 0x4d, 0xe4, - 0x4e, 0xb1, 0x49, 0x6c, 0xd0, 0x51, 0x5c, 0xfa, 0x71, 0x31, 0x41, 0x62, 0xd7, 0x3c, 0x71, 0x5d, - 0xb9, 0xf2, 0xec, 0x27, 0xaa, 0xf3, 0x6b, 0x41, 0x18, 0x4d, 0x46, 0xdf, 0xb1, 0x65, 0x9f, 0x99, - 0x15, 0xd9, 0x9a, 0xc3, 0x53, 0x39, 0xf4, 0x31, 0x14, 0xd8, 0xbc, 0x35, 0x8f, 0xf3, 0x64, 0xe0, - 0x7d, 0xa9, 0xa9, 0x84, 0x06, 0x0c, 0xa3, 0xa9, 0x95, 0xef, 0x00, 0xe8, 0xa3, 0x91, 0x6d, 0x11, - 0x53, 0xdb, 0x9b, 0xf0, 0x48, 0x3b, 0x8f, 0xf3, 0x92, 0x52, 0x9b, 0xb0, 0xe3, 0x12, 0xb0, 0x75, - 0x9f, 0x47, 0xcf, 0xd7, 0x18, 0x50, 0xa2, 0xab, 0x7e, 0x4d, 0x81, 0x45, 0x77, 0xec, 0x30, 0x83, - 0xca, 0xd1, 0xa9, 0x7f, 0x9a, 0x84, 0xb7, 0xda, 0xc4, 0x3f, 0xa6, 0xee, 0x61, 0xd5, 0xf7, 0x75, - 0xe3, 0x60, 0x48, 0x1c, 0xb9, 0x7c, 0x91, 0x84, 0x26, 0x71, 0x21, 0xa1, 0x59, 0x85, 0x79, 0xdd, - 0xb6, 0x74, 0x8f, 0x88, 0xe0, 0x2d, 0x8f, 0x83, 0x26, 0x4b, 0xbb, 0x58, 0x12, 0x47, 0x3c, 0x8f, - 0x88, 0xba, 0x0a, 0x1b, 0x78, 0x40, 0x40, 0xdf, 0x85, 0x9b, 0x32, 0x4c, 0xd3, 0xc3, 0xae, 0x58, - 0x42, 0x11, 0x14, 0x68, 0x9b, 0xb1, 0x59, 0x65, 0xfc, 0xe0, 0x64, 0x1c, 0x37, 0x25, 0x77, 0x46, - 0xbe, 0x8c, 0x0a, 0x57, 0xcc, 0x18, 0x56, 0x69, 0x13, 0x6e, 0x5d, 0x2a, 0xf2, 0xb9, 0xea, 0x36, - 0xff, 0x98, 0x04, 0x68, 0x75, 0xab, 0x3b, 0xd2, 0x48, 0x0d, 0xc8, 0xee, 0xeb, 0x43, 0xcb, 0x9e, - 0x5c, 0xe5, 0x01, 0xa7, 0xf8, 0x4a, 0x55, 0x98, 0x63, 0x83, 0xcb, 0x60, 0x29, 0xcb, 0x73, 0xca, - 0xf1, 0x9e, 0x43, 0xfc, 0x30, 0xa7, 0xe4, 0x2d, 0x36, 0x0c, 0x57, 0x77, 0xc2, 0xad, 0x2b, 0x1a, - 0x6c, 0x01, 0x58, 0xc8, 0x73, 0xac, 0x4f, 0x02, 0xb7, 0x25, 0x9b, 0x68, 0x8b, 0x57, 0x47, 0x89, - 0x7b, 0x44, 0xcc, 0xd5, 0x0c, 0x37, 0xea, 0x75, 0xe3, 0xc1, 0x12, 0x2e, 0x6c, 0x17, 0x4a, 0x97, - 0x3e, 0xe2, 0x21, 0xd3, 0x94, 0xf5, 0xb9, 0x6c, 0xf4, 0x10, 0x16, 0x2e, 0xcc, 0xf3, 0xb5, 0x64, - 0xbe, 0xd5, 0x7d, 0xfe, 0x55, 0x25, 0x2d, 0x7f, 0x7d, 0x5d, 0xc9, 0xaa, 0x7f, 0x9b, 0x12, 0x8e, - 0x46, 0x5a, 0x35, 0xfe, 0x55, 0x20, 0xc7, 0x77, 0xb7, 0x41, 0x6d, 0xe9, 0x00, 0xde, 0xbb, 0xda, - 0xff, 0xb0, 0x9c, 0x8e, 0xc3, 0x71, 0x28, 0x88, 0xd6, 0xa1, 0x20, 0x76, 0xb1, 0xc6, 0x0e, 0x1c, - 0x37, 0xeb, 0x02, 0x06, 0x41, 0x62, 0x92, 0xe8, 0x2e, 0x2c, 0xf2, 0xe2, 0x8f, 0x77, 0x40, 0x4c, - 0x81, 0x49, 0x73, 0xcc, 0x42, 0x48, 0xe5, 0xb0, 0x1d, 0x28, 0x4a, 0x82, 0xc6, 0xe3, 0xf9, 0x0c, - 0x1f, 0xd0, 0xbd, 0xeb, 0x06, 0x24, 0x44, 0x78, 0x98, 0x5f, 0x18, 0x4d, 0x1b, 0xea, 0x2f, 0x42, - 0x2e, 0x18, 0x2c, 0x5a, 0x85, 0x54, 0xbf, 0xde, 0x55, 0xe6, 0x4a, 0x4b, 0x27, 0xa7, 0xe5, 0x42, - 0x40, 0xee, 0xd7, 0xbb, 0x8c, 0xb3, 0xdb, 0xe8, 0x2a, 0x89, 0x8b, 0x9c, 0xdd, 0x46, 0x17, 0x95, - 0x20, 0xdd, 0xab, 0xf7, 0xbb, 0x41, 0x7c, 0x16, 0xb0, 0x18, 0xad, 0x94, 0x66, 0xf1, 0x99, 0xba, - 0x0f, 0x85, 0x48, 0xef, 0xe8, 0x1d, 0x98, 0x6f, 0xb5, 0x37, 0x71, 0xb3, 0xd7, 0x53, 0xe6, 0x4a, - 0x37, 0x4f, 0x4e, 0xcb, 0x28, 0xc2, 0x6d, 0x39, 0x03, 0xb6, 0x76, 0xe8, 0x0e, 0xa4, 0xb7, 0x3a, - 0xec, 0xde, 0x17, 0xc9, 0x45, 0x04, 0xb1, 0x45, 0x3d, 0xbf, 0x74, 0x43, 0x06, 0x7e, 0x51, 0xc5, - 0xea, 0xef, 0x27, 0x20, 0x2b, 0x0e, 0x5a, 0xec, 0x22, 0x56, 0x61, 0x3e, 0x28, 0x21, 0x88, 0xc4, - 0xef, 0xbd, 0xcb, 0x93, 0xb4, 0x8a, 0xcc, 0xa9, 0xc4, 0xd6, 0x0c, 0xe4, 0x4a, 0x1f, 0x42, 0x31, - 0xca, 0xf8, 0x5c, 0x1b, 0xf3, 0xbb, 0x50, 0x60, 0x7b, 0x3f, 0x48, 0xd6, 0x1e, 0x43, 0x56, 0x38, - 0x8b, 0xf0, 0x1e, 0xba, 0x3c, 0x63, 0x94, 0x48, 0xf4, 0x14, 0xe6, 0x45, 0x96, 0x19, 0x54, 0x8e, - 0xd7, 0xae, 0x3e, 0x61, 0x38, 0x80, 0xab, 0x1f, 0x43, 0xba, 0x4b, 0x88, 0xcb, 0x6c, 0xef, 0x50, - 0x93, 0x4c, 0xaf, 0x6e, 0x99, 0x20, 0x9b, 0xa4, 0xd5, 0x60, 0x09, 0xb2, 0x49, 0x5a, 0x66, 0x58, - 0x1b, 0x4b, 0x46, 0x6a, 0x63, 0x7d, 0x28, 0xbe, 0x20, 0xd6, 0xe0, 0xc0, 0x27, 0x26, 0x57, 0x74, - 0x1f, 0xd2, 0x23, 0x12, 0x0e, 0x7e, 0x35, 0x76, 0xf3, 0x11, 0xe2, 0x62, 0x8e, 0x62, 0x3e, 0xe6, - 0x98, 0x4b, 0xcb, 0xe7, 0x0e, 0xd9, 0x52, 0xff, 0x21, 0x09, 0x8b, 0x2d, 0xcf, 0x1b, 0xeb, 0x8e, - 0x11, 0x44, 0x75, 0xdf, 0xbc, 0x18, 0xd5, 0xc5, 0xbe, 0x0b, 0x5d, 0x14, 0xb9, 0x58, 0xf2, 0x93, - 0x37, 0x6b, 0x32, 0xbc, 0x59, 0xd5, 0x1f, 0x27, 0x82, 0xba, 0xde, 0xdd, 0x88, 0x2b, 0x10, 0x39, - 0x62, 0x54, 0x13, 0xd9, 0x75, 0x0e, 0x1d, 0x7a, 0xec, 0xa0, 0xb7, 0x21, 0x83, 0x9b, 0xed, 0xe6, - 0x0b, 0x25, 0x21, 0xb6, 0xe7, 0x05, 0x10, 0x26, 0x0e, 0x39, 0x66, 0x9a, 0xba, 0xcd, 0x76, 0x83, - 0x45, 0x61, 0xc9, 0x18, 0x4d, 0x5d, 0xe2, 0x98, 0x96, 0x33, 0x40, 0xef, 0x40, 0xb6, 0xd5, 0xeb, - 0xed, 0xf2, 0x14, 0xf2, 0xad, 0x93, 0xd3, 0xf2, 0x8d, 0x0b, 0x28, 0x5e, 0xd3, 0x35, 0x19, 0x88, - 0xa5, 0x40, 0x2c, 0x3e, 0x8b, 0x01, 0xb1, 0xd8, 0x5a, 0x80, 0x70, 0xa7, 0x5f, 0xed, 0x37, 0x95, - 0x4c, 0x0c, 0x08, 0x53, 0xf6, 0x57, 0x1e, 0xb7, 0x7f, 0x49, 0x82, 0x52, 0x35, 0x0c, 0x32, 0xf2, - 0x19, 0x5f, 0x66, 0x9d, 0x7d, 0xc8, 0x8d, 0xd8, 0x2f, 0x8b, 0x04, 0x11, 0xd4, 0xd3, 0xd8, 0x97, - 0xcd, 0x19, 0xb9, 0x0a, 0xa6, 0x36, 0xa9, 0x9a, 0x43, 0xcb, 0xf3, 0x2c, 0xea, 0x08, 0x1a, 0x0e, - 0x35, 0x95, 0xfe, 0x33, 0x01, 0x37, 0x62, 0x10, 0xe8, 0x21, 0xa4, 0x5d, 0x6a, 0x07, 0x6b, 0x78, - 0xfb, 0xb2, 0x92, 0x2d, 0x13, 0xc5, 0x1c, 0x89, 0xd6, 0x00, 0xf4, 0xb1, 0x4f, 0x75, 0xde, 0x3f, - 0x5f, 0xbd, 0x1c, 0x8e, 0x50, 0xd0, 0x0b, 0xc8, 0x7a, 0xc4, 0x70, 0x49, 0x10, 0x67, 0x7f, 0xfc, - 0xff, 0x1d, 0x7d, 0xa5, 0xc7, 0xd5, 0x60, 0xa9, 0xae, 0x54, 0x81, 0xac, 0xa0, 0xb0, 0x6d, 0x6f, - 0xea, 0xbe, 0x2e, 0x0b, 0xfa, 0xfc, 0x37, 0xdb, 0x4d, 0xba, 0x3d, 0x08, 0x76, 0x93, 0x6e, 0x0f, - 0xd4, 0xbf, 0x49, 0x02, 0x34, 0x5f, 0xfa, 0xc4, 0x75, 0x74, 0xbb, 0x5e, 0x45, 0xcd, 0xc8, 0xcd, - 0x20, 0x66, 0xfb, 0xe5, 0xd8, 0x57, 0x8a, 0x50, 0xa2, 0x52, 0xaf, 0xc6, 0xdc, 0x0d, 0xb7, 0x20, - 0x35, 0x76, 0xe5, 0x63, 0xb5, 0x88, 0x91, 0x77, 0xf1, 0x36, 0x66, 0x34, 0xd4, 0x9c, 0xba, 0xad, - 0xd4, 0xe5, 0x4f, 0xd2, 0x91, 0x0e, 0x62, 0x5d, 0x17, 0x3b, 0xf9, 0x86, 0xae, 0x19, 0x44, 0xde, - 0x2a, 0x45, 0x71, 0xf2, 0xeb, 0xd5, 0x3a, 0x71, 0x7d, 0x9c, 0x35, 0x74, 0xf6, 0xff, 0x27, 0xf2, - 0x6f, 0xf7, 0x01, 0xa6, 0x53, 0x43, 0x6b, 0x90, 0xa9, 0x6f, 0xf4, 0x7a, 0xdb, 0xca, 0x9c, 0x70, - 0xe0, 0x53, 0x16, 0x27, 0xab, 0x7f, 0x99, 0x84, 0x5c, 0xbd, 0x2a, 0xaf, 0xdc, 0x3a, 0x28, 0xdc, - 0x2b, 0xf1, 0x67, 0x10, 0xf2, 0x72, 0x64, 0xb9, 0x13, 0xe9, 0x58, 0xae, 0x48, 0x78, 0x17, 0x99, - 0x08, 0x1b, 0x75, 0x93, 0x0b, 0x20, 0x0c, 0x45, 0x22, 0x8d, 0xa0, 0x19, 0x7a, 0xe0, 0xe3, 0xd7, - 0xae, 0x36, 0x96, 0x48, 0x5d, 0xa6, 0x6d, 0x0f, 0x17, 0x02, 0x25, 0x75, 0xdd, 0x43, 0x1f, 0xc0, - 0x92, 0x67, 0x0d, 0x1c, 0xcb, 0x19, 0x68, 0x81, 0xf1, 0xf8, 0x9b, 0x4c, 0x6d, 0xf9, 0xfc, 0x6c, - 0x7d, 0xa1, 0x27, 0x58, 0xd2, 0x86, 0x0b, 0x12, 0x59, 0xe7, 0xa6, 0x44, 0x5f, 0x87, 0xc5, 0x88, - 0x28, 0xb3, 0xa2, 0x30, 0xbb, 0x72, 0x7e, 0xb6, 0x5e, 0x0c, 0x25, 0x9f, 0x91, 0x09, 0x2e, 0x86, - 0x82, 0xcf, 0x08, 0xaf, 0xcd, 0xec, 0x53, 0xd7, 0x20, 0x9a, 0xcb, 0xcf, 0x34, 0xbf, 0xdd, 0xd3, - 0xb8, 0xc0, 0x69, 0xe2, 0x98, 0xab, 0xcf, 0xe1, 0x46, 0xc7, 0x35, 0x0e, 0x88, 0xe7, 0x0b, 0x53, - 0x48, 0x2b, 0x7e, 0x0c, 0xb7, 0x7d, 0xdd, 0x3b, 0xd4, 0x0e, 0x2c, 0xcf, 0xa7, 0xee, 0x44, 0x73, - 0x89, 0x4f, 0x1c, 0xc6, 0xd7, 0xf8, 0x43, 0xae, 0x2c, 0x08, 0xde, 0x62, 0x98, 0x2d, 0x01, 0xc1, - 0x01, 0x62, 0x9b, 0x01, 0xd4, 0x16, 0x14, 0x59, 0x0a, 0x23, 0x8b, 0x6a, 0x6c, 0xf6, 0x60, 0xd3, - 0x81, 0xf6, 0xc6, 0xd7, 0x54, 0xde, 0xa6, 0x03, 0xf1, 0x53, 0xfd, 0x36, 0x28, 0x0d, 0xcb, 0x1b, - 0xe9, 0xbe, 0x71, 0x10, 0x54, 0x3a, 0x51, 0x03, 0x94, 0x03, 0xa2, 0xbb, 0xfe, 0x1e, 0xd1, 0x7d, - 0x6d, 0x44, 0x5c, 0x8b, 0x9a, 0xd7, 0xaf, 0xf2, 0x52, 0x28, 0xd2, 0xe5, 0x12, 0xea, 0x7f, 0x27, - 0x00, 0xb0, 0xbe, 0x1f, 0x44, 0x6b, 0x5f, 0x81, 0x65, 0xcf, 0xd1, 0x47, 0xde, 0x01, 0xf5, 0x35, - 0xcb, 0xf1, 0x89, 0x7b, 0xa4, 0xdb, 0xb2, 0xb8, 0xa3, 0x04, 0x8c, 0x96, 0xa4, 0xa3, 0xfb, 0x80, - 0x0e, 0x09, 0x19, 0x69, 0xd4, 0x36, 0xb5, 0x80, 0x29, 0x9e, 0x99, 0xd3, 0x58, 0x61, 0x9c, 0x8e, - 0x6d, 0xf6, 0x02, 0x3a, 0xaa, 0xc1, 0x1a, 0x9b, 0x3e, 0x71, 0x7c, 0xd7, 0x22, 0x9e, 0xb6, 0x4f, - 0x5d, 0xcd, 0xb3, 0xe9, 0xb1, 0xb6, 0x4f, 0x6d, 0x9b, 0x1e, 0x13, 0x37, 0xa8, 0x9b, 0x95, 0x6c, - 0x3a, 0x68, 0x0a, 0xd0, 0x06, 0x75, 0x7b, 0x36, 0x3d, 0xde, 0x08, 0x10, 0x2c, 0xa4, 0x9b, 0xce, - 0xd9, 0xb7, 0x8c, 0xc3, 0x20, 0xa4, 0x0b, 0xa9, 0x7d, 0xcb, 0x38, 0x44, 0xef, 0xc0, 0x02, 0xb1, - 0x09, 0x2f, 0x9f, 0x08, 0x54, 0x86, 0xa3, 0x8a, 0x01, 0x91, 0x81, 0xd4, 0x4f, 0x40, 0x69, 0x3a, - 0x86, 0x3b, 0x19, 0x45, 0xd6, 0xfc, 0x3e, 0x20, 0xe6, 0x24, 0x35, 0x9b, 0x1a, 0x87, 0xda, 0x50, - 0x77, 0xf4, 0x01, 0x1b, 0x97, 0x78, 0xfd, 0x53, 0x18, 0x67, 0x9b, 0x1a, 0x87, 0x3b, 0x92, 0xae, - 0x7e, 0x00, 0xd0, 0x1b, 0xb9, 0x44, 0x37, 0x3b, 0x2c, 0x9a, 0x60, 0xa6, 0xe3, 0x2d, 0xcd, 0x94, - 0xaf, 0xa7, 0xd4, 0x95, 0x47, 0x5d, 0x11, 0x8c, 0x46, 0x48, 0x57, 0x7f, 0x1e, 0x6e, 0x74, 0x6d, - 0xdd, 0xe0, 0x5f, 0x12, 0x74, 0xc3, 0xe7, 0x2c, 0xf4, 0x14, 0xb2, 0x02, 0x2a, 0x57, 0x32, 0xf6, - 0xb8, 0x4d, 0xfb, 0xdc, 0x9a, 0xc3, 0x12, 0x5f, 0x2b, 0x02, 0x4c, 0xf5, 0xa8, 0x7f, 0x9e, 0x80, - 0x7c, 0xa8, 0x1f, 0x95, 0xc5, 0x2b, 0x8d, 0xef, 0xea, 0x96, 0x23, 0x33, 0xfe, 0x3c, 0x8e, 0x92, - 0x50, 0x0b, 0x0a, 0xa3, 0x50, 0xfa, 0xca, 0x78, 0x2e, 0x66, 0xd4, 0x38, 0x2a, 0x8b, 0x3e, 0x84, - 0x7c, 0xf0, 0x5c, 0x1d, 0x78, 0xd8, 0xab, 0x5f, 0xb7, 0xa7, 0x70, 0xf5, 0x9b, 0x00, 0xdf, 0xa2, - 0x96, 0xd3, 0xa7, 0x87, 0xc4, 0xe1, 0xcf, 0xaf, 0x2c, 0x5f, 0x24, 0x81, 0x15, 0x65, 0x8b, 0x97, - 0x01, 0xc4, 0x12, 0x84, 0xaf, 0x90, 0xa2, 0xa9, 0xfe, 0x75, 0x12, 0xb2, 0x98, 0x52, 0xbf, 0x5e, - 0x45, 0x65, 0xc8, 0x4a, 0x3f, 0xc1, 0xef, 0x9f, 0x5a, 0xfe, 0xfc, 0x6c, 0x3d, 0x23, 0x1c, 0x44, - 0xc6, 0xe0, 0x9e, 0x21, 0xe2, 0xc1, 0x93, 0x97, 0x79, 0x70, 0xf4, 0x10, 0x8a, 0x12, 0xa4, 0x1d, - 0xe8, 0xde, 0x81, 0x48, 0xde, 0x6a, 0x8b, 0xe7, 0x67, 0xeb, 0x20, 0x90, 0x5b, 0xba, 0x77, 0x80, - 0x41, 0xa0, 0xd9, 0x6f, 0xd4, 0x84, 0xc2, 0xa7, 0xd4, 0x72, 0x34, 0x9f, 0x4f, 0x42, 0x16, 0x1a, - 0x63, 0xd7, 0x71, 0x3a, 0x55, 0xf9, 0x2d, 0x02, 0x7c, 0x3a, 0x9d, 0x7c, 0x13, 0x16, 0x5c, 0x4a, - 0x7d, 0xe1, 0xb6, 0x2c, 0xea, 0xc8, 0x1a, 0x46, 0x39, 0xb6, 0xb4, 0x4d, 0xa9, 0x8f, 0x25, 0x0e, - 0x17, 0xdd, 0x48, 0x0b, 0x3d, 0x84, 0x15, 0x5b, 0xf7, 0x7c, 0x8d, 0xfb, 0x3b, 0x73, 0xaa, 0x2d, - 0xcb, 0x8f, 0x1a, 0x62, 0xbc, 0x0d, 0xce, 0x0a, 0x24, 0xd4, 0x7f, 0x4a, 0x40, 0x81, 0x4d, 0xc6, - 0xda, 0xb7, 0x0c, 0x16, 0xe4, 0x7d, 0xfe, 0xd8, 0xe3, 0x16, 0xa4, 0x0c, 0xcf, 0x95, 0x46, 0xe5, - 0x97, 0x6f, 0xbd, 0x87, 0x31, 0xa3, 0xa1, 0x4f, 0x20, 0x2b, 0x6b, 0x29, 0x22, 0xec, 0x50, 0xaf, - 0x0f, 0x47, 0xa5, 0x6d, 0xa4, 0x1c, 0xdf, 0xcb, 0xd3, 0xd1, 0x89, 0x4b, 0x00, 0x47, 0x49, 0xe8, - 0x26, 0x24, 0x0d, 0x61, 0x2e, 0xf9, 0xb1, 0x4b, 0xbd, 0x8d, 0x93, 0x86, 0xa3, 0xfe, 0x20, 0x01, - 0x0b, 0xd3, 0x03, 0xcf, 0x76, 0xc0, 0x6d, 0xc8, 0x7b, 0xe3, 0x3d, 0x6f, 0xe2, 0xf9, 0x64, 0x18, - 0x3c, 0x2d, 0x87, 0x04, 0xd4, 0x82, 0xbc, 0x6e, 0x0f, 0xa8, 0x6b, 0xf9, 0x07, 0x43, 0x99, 0xa5, - 0xc6, 0x87, 0x0a, 0x51, 0x9d, 0x95, 0x6a, 0x20, 0x82, 0xa7, 0xd2, 0xc1, 0xbd, 0x2f, 0xbe, 0x3f, - 0xe0, 0xf7, 0xfe, 0xdb, 0x50, 0xb4, 0xf5, 0x21, 0x2f, 0x2e, 0xf9, 0xd6, 0x50, 0xcc, 0x23, 0x8d, - 0x0b, 0x92, 0xd6, 0xb7, 0x86, 0x44, 0x55, 0x21, 0x1f, 0x2a, 0x43, 0x4b, 0x50, 0xa8, 0x36, 0x7b, - 0xda, 0xa3, 0xc7, 0x4f, 0xb5, 0xcd, 0xfa, 0x8e, 0x32, 0x27, 0x63, 0xd3, 0xbf, 0x48, 0xc0, 0x82, - 0x74, 0x47, 0x32, 0xde, 0x7f, 0x07, 0xe6, 0x5d, 0x7d, 0xdf, 0x0f, 0x32, 0x92, 0xb4, 0xd8, 0xd5, - 0xcc, 0xc3, 0xb3, 0x8c, 0x84, 0xb1, 0xe2, 0x33, 0x92, 0xc8, 0xc7, 0x0e, 0xa9, 0x2b, 0x3f, 0x76, - 0x48, 0xff, 0x54, 0x3e, 0x76, 0x50, 0x7f, 0x0d, 0x60, 0xc3, 0xb2, 0x49, 0x5f, 0xd4, 0xa1, 0xe2, - 0xf2, 0x4b, 0x16, 0xc3, 0xc9, 0x3a, 0x67, 0x10, 0xc3, 0xb5, 0x1a, 0x98, 0xd1, 0x18, 0x6b, 0x60, - 0x99, 0xf2, 0x30, 0x72, 0xd6, 0x26, 0x63, 0x0d, 0x2c, 0x33, 0x7c, 0x95, 0x4b, 0x5f, 0xf7, 0x2a, - 0x77, 0x9a, 0x80, 0x25, 0x19, 0xbb, 0x86, 0xee, 0xf7, 0xcb, 0x90, 0x17, 0x61, 0xec, 0x34, 0xa1, - 0xe3, 0x0f, 0xfc, 0x02, 0xd7, 0x6a, 0xe0, 0x9c, 0x60, 0xb7, 0x4c, 0xb4, 0x0e, 0x05, 0x09, 0x8d, - 0x7c, 0x18, 0x05, 0x82, 0xd4, 0x66, 0xc3, 0xff, 0x2a, 0xa4, 0xf7, 0x2d, 0x9b, 0xc8, 0x8d, 0x1e, - 0xeb, 0x00, 0xa6, 0x06, 0xd8, 0x9a, 0xc3, 0x1c, 0x5d, 0xcb, 0x05, 0x85, 0x3a, 0x3e, 0x3e, 0x99, - 0x76, 0x46, 0xc7, 0x27, 0x32, 0xd0, 0x99, 0xf1, 0x09, 0x1c, 0x1b, 0x9f, 0x60, 0x8b, 0xf1, 0x49, - 0x68, 0x74, 0x7c, 0x82, 0xf4, 0x53, 0x19, 0xdf, 0x36, 0xdc, 0xac, 0xd9, 0xba, 0x71, 0x68, 0x5b, - 0x9e, 0x4f, 0xcc, 0xa8, 0xc7, 0x78, 0x0c, 0xd9, 0x0b, 0x41, 0xe7, 0x55, 0x15, 0x4d, 0x89, 0x54, - 0xff, 0x3d, 0x01, 0xc5, 0x2d, 0xa2, 0xdb, 0xfe, 0xc1, 0xb4, 0x6c, 0xe4, 0x13, 0xcf, 0x97, 0x97, - 0x15, 0xff, 0x8d, 0xbe, 0x06, 0xb9, 0x30, 0x26, 0xb9, 0xf6, 0x6d, 0x2e, 0x84, 0xa2, 0x27, 0x30, - 0xcf, 0xce, 0x18, 0x1d, 0x07, 0xc9, 0xce, 0x55, 0xcf, 0x3e, 0x12, 0xc9, 0x2e, 0x19, 0x97, 0xf0, - 0x20, 0x84, 0x6f, 0xa5, 0x0c, 0x0e, 0x9a, 0xe8, 0x67, 0xa1, 0xc8, 0x5f, 0x2d, 0x82, 0x98, 0x2b, - 0x73, 0x9d, 0xce, 0x82, 0x78, 0x78, 0x14, 0xf1, 0xd6, 0xff, 0x26, 0x60, 0x65, 0x47, 0x9f, 0xec, - 0x11, 0xe9, 0x36, 0x88, 0x89, 0x89, 0x41, 0x5d, 0x13, 0x75, 0xa3, 0xee, 0xe6, 0x8a, 0x77, 0xcc, - 0x38, 0xe1, 0x78, 0xaf, 0x13, 0x24, 0x60, 0xc9, 0x48, 0x02, 0xb6, 0x02, 0x19, 0x87, 0x3a, 0x06, - 0x91, 0xbe, 0x48, 0x34, 0x54, 0x2b, 0xea, 0x6a, 0x4a, 0xe1, 0x13, 0x23, 0x2f, 0x40, 0xb5, 0xa9, - 0x1f, 0xf6, 0x86, 0x3e, 0x81, 0x52, 0xaf, 0x59, 0xc7, 0xcd, 0x7e, 0xad, 0xf3, 0x6d, 0xad, 0x57, - 0xdd, 0xee, 0x55, 0x1f, 0x3f, 0xd4, 0xba, 0x9d, 0xed, 0xef, 0x3c, 0x7a, 0xf2, 0xf0, 0x6b, 0x4a, - 0xa2, 0x54, 0x3e, 0x39, 0x2d, 0xdf, 0x6e, 0x57, 0xeb, 0xdb, 0xe2, 0xc4, 0xec, 0xd1, 0x97, 0x3d, - 0xdd, 0xf6, 0xf4, 0xc7, 0x0f, 0xbb, 0xd4, 0x9e, 0x30, 0x0c, 0xdb, 0xd6, 0xc5, 0xe8, 0x7d, 0x15, - 0xbd, 0x86, 0x13, 0x97, 0x5e, 0xc3, 0xd3, 0xdb, 0x3c, 0x79, 0xc9, 0x6d, 0xbe, 0x01, 0x2b, 0x86, - 0x4b, 0x3d, 0x4f, 0x63, 0xd1, 0x3f, 0x31, 0x67, 0xf2, 0x8b, 0x2f, 0x9c, 0x9f, 0xad, 0x2f, 0xd7, - 0x19, 0xbf, 0xc7, 0xd9, 0x52, 0xfd, 0xb2, 0x11, 0x21, 0xf1, 0x9e, 0xd4, 0x3f, 0x48, 0xb1, 0x40, - 0xca, 0x3a, 0xb2, 0x6c, 0x32, 0x20, 0x1e, 0x7a, 0x0e, 0x4b, 0x86, 0x4b, 0x4c, 0x16, 0xd6, 0xeb, - 0x76, 0xf4, 0x03, 0xdb, 0x9f, 0x89, 0x8d, 0x69, 0x42, 0xc1, 0x4a, 0x3d, 0x94, 0xea, 0x8d, 0x88, - 0x81, 0x17, 0x8d, 0x0b, 0x6d, 0xf4, 0x29, 0x2c, 0x79, 0xc4, 0xb6, 0x9c, 0xf1, 0x4b, 0xcd, 0xa0, - 0x8e, 0x4f, 0x5e, 0x06, 0xaf, 0x65, 0xd7, 0xe9, 0xed, 0x35, 0xb7, 0x99, 0x54, 0x5d, 0x08, 0xd5, - 0xd0, 0xf9, 0xd9, 0xfa, 0xe2, 0x45, 0x1a, 0x5e, 0x94, 0x9a, 0x65, 0xbb, 0xd4, 0x86, 0xc5, 0x8b, - 0xa3, 0x41, 0x2b, 0xf2, 0xec, 0x73, 0x17, 0x12, 0x9c, 0x6d, 0x74, 0x1b, 0x72, 0x2e, 0x19, 0x58, - 0x9e, 0xef, 0x0a, 0x33, 0x33, 0x4e, 0x48, 0x61, 0x27, 0x5f, 0x7c, 0x1d, 0x55, 0xfa, 0x15, 0x98, - 0xe9, 0x91, 0x1d, 0x16, 0xd3, 0xf2, 0xf4, 0x3d, 0xa9, 0x32, 0x87, 0x83, 0x26, 0xdb, 0x83, 0x63, - 0x2f, 0x0c, 0xd4, 0xf8, 0x6f, 0x46, 0xe3, 0x11, 0x85, 0xfc, 0x56, 0x8c, 0xc7, 0x0c, 0xc1, 0x47, - 0xa7, 0xe9, 0xc8, 0x47, 0xa7, 0x2b, 0x90, 0xb1, 0xc9, 0x11, 0xb1, 0xc5, 0x5d, 0x8e, 0x45, 0xe3, - 0xde, 0x43, 0x28, 0x06, 0x5f, 0x37, 0xf2, 0xaf, 0x2a, 0x72, 0x90, 0xee, 0x57, 0x7b, 0xcf, 0x94, - 0x39, 0x04, 0x90, 0x15, 0x9b, 0x53, 0xbc, 0xe4, 0xd5, 0x3b, 0xed, 0x8d, 0xd6, 0xa6, 0x92, 0xbc, - 0xf7, 0xbb, 0x69, 0xc8, 0x87, 0x6f, 0x49, 0xec, 0xee, 0x68, 0x37, 0x5f, 0x04, 0xbb, 0x3b, 0xa4, - 0xb7, 0xc9, 0x31, 0x7a, 0x7b, 0x5a, 0x85, 0xfa, 0x44, 0x3c, 0x9e, 0x87, 0xec, 0xa0, 0x02, 0xf5, - 0x2e, 0xe4, 0xaa, 0xbd, 0x5e, 0x6b, 0xb3, 0xdd, 0x6c, 0x28, 0x9f, 0x25, 0x4a, 0x5f, 0x38, 0x39, - 0x2d, 0x2f, 0x87, 0xa0, 0xaa, 0x27, 0x36, 0x1f, 0x47, 0xd5, 0xeb, 0xcd, 0x6e, 0xbf, 0xd9, 0x50, - 0x5e, 0x25, 0x67, 0x51, 0xbc, 0xaa, 0xc2, 0x3f, 0xeb, 0xc9, 0x77, 0x71, 0xb3, 0x5b, 0xc5, 0xac, - 0xc3, 0xcf, 0x92, 0xa2, 0x38, 0x36, 0xed, 0xd1, 0x25, 0x23, 0xdd, 0x65, 0x7d, 0xae, 0x05, 0xdf, - 0xc9, 0xbd, 0x4a, 0x89, 0x4f, 0x3f, 0xa6, 0x0f, 0x63, 0x44, 0x37, 0x27, 0xac, 0x37, 0xfe, 0x22, - 0xc9, 0xd5, 0xa4, 0x66, 0x7a, 0xeb, 0x31, 0xdf, 0xc3, 0xb4, 0xa8, 0x30, 0x8f, 0x77, 0xdb, 0x6d, - 0x06, 0x7a, 0x95, 0x9e, 0x99, 0x1d, 0x1e, 0x3b, 0x2c, 0x63, 0x46, 0x77, 0x21, 0x17, 0x3c, 0x58, - 0x2a, 0x9f, 0xa5, 0x67, 0x06, 0x54, 0x0f, 0x5e, 0x5b, 0x79, 0x87, 0x5b, 0xbb, 0x7d, 0xfe, 0x19, - 0xdf, 0xab, 0xcc, 0x6c, 0x87, 0x07, 0x63, 0xdf, 0xa4, 0xc7, 0x0e, 0x3b, 0xb3, 0xb2, 0x0e, 0xf7, - 0x59, 0x46, 0x14, 0x2d, 0x42, 0x8c, 0x2c, 0xc2, 0xbd, 0x0b, 0x39, 0xdc, 0xfc, 0x96, 0xf8, 0xe2, - 0xef, 0x55, 0x76, 0x46, 0x0f, 0x26, 0x9f, 0x12, 0x83, 0xf5, 0x56, 0x86, 0x2c, 0x6e, 0xee, 0x74, - 0x9e, 0x37, 0x95, 0x3f, 0xcc, 0xce, 0xe8, 0xc1, 0x64, 0x48, 0xf9, 0x77, 0x4f, 0xb9, 0x0e, 0xee, - 0x6e, 0x55, 0xf9, 0xa2, 0xcc, 0xea, 0xe9, 0xb8, 0xa3, 0x03, 0xdd, 0x21, 0xe6, 0xf4, 0x0b, 0x99, - 0x90, 0x75, 0xef, 0x17, 0x20, 0x17, 0xc4, 0xae, 0x68, 0x0d, 0xb2, 0x2f, 0x3a, 0xf8, 0x59, 0x13, - 0x2b, 0x73, 0xc2, 0xca, 0x01, 0xe7, 0x85, 0xc8, 0x3a, 0xca, 0x30, 0xbf, 0x53, 0x6d, 0x57, 0x37, - 0x9b, 0x38, 0x28, 0xa2, 0x07, 0x00, 0x19, 0x80, 0x95, 0x14, 0xd9, 0x41, 0xa8, 0xb3, 0xb6, 0xfa, - 0xfd, 0x1f, 0xad, 0xcd, 0xfd, 0xf0, 0x47, 0x6b, 0x73, 0xaf, 0xce, 0xd7, 0x12, 0xdf, 0x3f, 0x5f, - 0x4b, 0xfc, 0xfd, 0xf9, 0x5a, 0xe2, 0xdf, 0xce, 0xd7, 0x12, 0x7b, 0x59, 0x7e, 0x4d, 0x3c, 0xf9, - 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x9b, 0x2e, 0x8d, 0x2e, 0x32, 0x00, 0x00, + // 5116 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x7a, 0x5d, 0x6c, 0x23, 0x59, + 0x56, 0x7f, 0xec, 0xd8, 0x8e, 0x7d, 0xec, 0x24, 0xd5, 0xb7, 0xb3, 0x3d, 0x69, 0x6f, 0x4f, 0xe2, + 0xa9, 0x99, 0xde, 0x99, 0xed, 0x9d, 0xbf, 0xfb, 0x6b, 0x77, 0xd5, 0x33, 0xf3, 0xdf, 0x9d, 0xb1, + 0xcb, 0x95, 0x8e, 0xb7, 0xd3, 0xb6, 0x75, 0xed, 0x74, 0xef, 0x22, 0x41, 0x51, 0xa9, 0xba, 0x71, + 0x6a, 0x52, 0xae, 0x6b, 0xaa, 0xca, 0xe9, 0x36, 0x0b, 0x62, 0xc4, 0x03, 0xa0, 0x3c, 0xc1, 0x0b, + 0x2c, 0x42, 0x41, 0x48, 0xf0, 0xc6, 0x03, 0x0f, 0x20, 0x21, 0x78, 0x1a, 0x24, 0x84, 0x56, 0xbc, + 0xc0, 0x82, 0x84, 0x56, 0x20, 0x05, 0x36, 0x0f, 0xbc, 0xad, 0xe0, 0x05, 0xf1, 0xc2, 0x03, 0xba, + 0x1f, 0x55, 0xae, 0xb8, 0x2b, 0xc9, 0x0c, 0xbb, 0x2f, 0x89, 0xef, 0x39, 0xbf, 0x73, 0xee, 0xbd, + 0xe7, 0xde, 0x7b, 0xee, 0x39, 0xe7, 0x16, 0xdc, 0x19, 0x3a, 0xe1, 0xc1, 0x64, 0xaf, 0x6e, 0xd1, + 0xd1, 0x5d, 0x9b, 0x5a, 0x87, 0xc4, 0xbf, 0x1b, 0xbc, 0x30, 0xfd, 0xd1, 0xa1, 0x13, 0xde, 0x35, + 0xc7, 0xce, 0xdd, 0x70, 0x3a, 0x26, 0x41, 0x7d, 0xec, 0xd3, 0x90, 0x22, 0x24, 0x00, 0xf5, 0x08, + 0x50, 0x3f, 0xba, 0x5f, 0xdd, 0x1c, 0x52, 0x3a, 0x74, 0xc9, 0x5d, 0x8e, 0xd8, 0x9b, 0xec, 0xdf, + 0x0d, 0x9d, 0x11, 0x09, 0x42, 0x73, 0x34, 0x16, 0x42, 0xd5, 0x8d, 0x79, 0x80, 0x3d, 0xf1, 0xcd, + 0xd0, 0xa1, 0x9e, 0xe4, 0xaf, 0x0d, 0xe9, 0x90, 0xf2, 0x9f, 0x77, 0xd9, 0x2f, 0x41, 0x55, 0x37, + 0x61, 0xe9, 0x19, 0xf1, 0x03, 0x87, 0x7a, 0x68, 0x0d, 0xf2, 0x8e, 0x67, 0x93, 0x97, 0xeb, 0x99, + 0x5a, 0xe6, 0x9d, 0x1c, 0x16, 0x0d, 0xf5, 0x1e, 0x40, 0x9b, 0xfd, 0xd0, 0xbd, 0xd0, 0x9f, 0x22, + 0x05, 0x16, 0x0f, 0xc9, 0x94, 0x23, 0x4a, 0x98, 0xfd, 0x64, 0x94, 0x23, 0xd3, 0x5d, 0xcf, 0x0a, + 0xca, 0x91, 0xe9, 0xaa, 0x3f, 0xca, 0x40, 0xb9, 0xe1, 0x79, 0x34, 0xe4, 0xbd, 0x07, 0x08, 0x41, + 0xce, 0x33, 0x47, 0x44, 0x0a, 0xf1, 0xdf, 0x48, 0x83, 0x82, 0x6b, 0xee, 0x11, 0x37, 0x58, 0xcf, + 0xd6, 0x16, 0xdf, 0x29, 0x3f, 0xf8, 0x4a, 0xfd, 0xd5, 0x29, 0xd7, 0x13, 0x4a, 0xea, 0x3b, 0x1c, + 0xcd, 0x07, 0x81, 0xa5, 0x28, 0xfa, 0x26, 0x2c, 0x39, 0x9e, 0xed, 0x58, 0x24, 0x58, 0xcf, 0x71, + 0x2d, 0x1b, 0x69, 0x5a, 0x66, 0xa3, 0x6f, 0xe6, 0xbe, 0x7f, 0xba, 0xb9, 0x80, 0x23, 0xa1, 0xea, + 0x7b, 0x50, 0x4e, 0xa8, 0x4d, 0x99, 0xdb, 0x1a, 0xe4, 0x8f, 0x4c, 0x77, 0x42, 0xe4, 0xec, 0x44, + 0xe3, 0xfd, 0xec, 0xa3, 0x8c, 0xfa, 0x11, 0xac, 0x75, 0xcc, 0x11, 0xb1, 0x1f, 0x13, 0x8f, 0xf8, + 0x8e, 0x85, 0x49, 0x40, 0x27, 0xbe, 0x45, 0xd8, 0x5c, 0x0f, 0x1d, 0xcf, 0x8e, 0xe6, 0xca, 0x7e, + 0xa7, 0x6b, 0x51, 0x35, 0x78, 0xad, 0xe5, 0x04, 0x96, 0x4f, 0x42, 0xf2, 0xb9, 0x95, 0x2c, 0x46, + 0x4a, 0x4e, 0x33, 0xb0, 0x3a, 0x2f, 0xfd, 0x33, 0x70, 0x9d, 0x99, 0xd8, 0x36, 0x7c, 0x49, 0x31, + 0x82, 0x31, 0xb1, 0xb8, 0xb2, 0xf2, 0x83, 0x77, 0xd2, 0x2c, 0x94, 0x36, 0x93, 0xed, 0x05, 0x7c, + 0x8d, 0xab, 0x89, 0x08, 0xfd, 0x31, 0xb1, 0x90, 0x05, 0x37, 0x6c, 0x39, 0xe8, 0x39, 0xf5, 0x59, + 0xae, 0x3e, 0x75, 0x19, 0x2f, 0x98, 0xe6, 0xf6, 0x02, 0x5e, 0x8b, 0x94, 0x25, 0x3b, 0x69, 0x02, + 0x14, 0x23, 0xdd, 0xea, 0xf7, 0x32, 0x50, 0x8a, 0x98, 0x01, 0xfa, 0x32, 0x94, 0x3c, 0xd3, 0xa3, + 0x86, 0x35, 0x9e, 0x04, 0x7c, 0x42, 0x8b, 0xcd, 0xca, 0xd9, 0xe9, 0x66, 0xb1, 0x63, 0x7a, 0x54, + 0xeb, 0xed, 0x06, 0xb8, 0xc8, 0xd8, 0xda, 0x78, 0x12, 0xa0, 0x37, 0xa0, 0x32, 0x22, 0x23, 0xea, + 0x4f, 0x8d, 0xbd, 0x69, 0x48, 0x02, 0x69, 0xb6, 0xb2, 0xa0, 0x35, 0x19, 0x09, 0x7d, 0x03, 0x96, + 0x86, 0x62, 0x48, 0xeb, 0x8b, 0x7c, 0xfb, 0xbc, 0x99, 0x36, 0xfa, 0xb9, 0x51, 0xe3, 0x48, 0x46, + 0xfd, 0xcd, 0x0c, 0xac, 0xc5, 0x54, 0xf2, 0x0b, 0x13, 0xc7, 0x27, 0x23, 0xe2, 0x85, 0x01, 0xfa, + 0x1a, 0x14, 0x5c, 0x67, 0xe4, 0x84, 0x81, 0xb4, 0xf9, 0xeb, 0x69, 0x6a, 0xe3, 0x49, 0x61, 0x09, + 0x46, 0x0d, 0xa8, 0xf8, 0x24, 0x20, 0xfe, 0x91, 0xd8, 0xf1, 0xd2, 0xa2, 0x57, 0x08, 0x9f, 0x13, + 0x51, 0xb7, 0xa0, 0xd8, 0x73, 0xcd, 0x70, 0x9f, 0xfa, 0x23, 0xa4, 0x42, 0xc5, 0xf4, 0xad, 0x03, + 0x27, 0x24, 0x56, 0x38, 0xf1, 0xa3, 0xd3, 0x77, 0x8e, 0x86, 0x6e, 0x40, 0x96, 0x8a, 0x8e, 0x4a, + 0xcd, 0xc2, 0xd9, 0xe9, 0x66, 0xb6, 0xdb, 0xc7, 0x59, 0x1a, 0xa8, 0x1f, 0xc0, 0xb5, 0x9e, 0x3b, + 0x19, 0x3a, 0x5e, 0x8b, 0x04, 0x96, 0xef, 0x8c, 0x99, 0x76, 0xb6, 0x2b, 0x99, 0x8f, 0x8a, 0x76, + 0x25, 0xfb, 0x1d, 0x1f, 0xed, 0xec, 0xec, 0x68, 0xab, 0xbf, 0x9e, 0x85, 0x6b, 0xba, 0x37, 0x74, + 0x3c, 0x92, 0x94, 0xbe, 0x0d, 0x2b, 0x84, 0x13, 0x8d, 0x23, 0xe1, 0x6e, 0xa4, 0x9e, 0x65, 0x41, + 0x8d, 0x7c, 0x50, 0x7b, 0xce, 0x2f, 0xdc, 0x4f, 0x9b, 0xfe, 0x2b, 0xda, 0x53, 0xbd, 0x83, 0x0e, + 0x4b, 0x63, 0x3e, 0x89, 0x40, 0x2e, 0xef, 0xed, 0x34, 0x5d, 0xaf, 0xcc, 0x33, 0x72, 0x12, 0x52, + 0xf6, 0x27, 0x71, 0x12, 0x7f, 0x9b, 0x85, 0xd5, 0x0e, 0xb5, 0xcf, 0xd9, 0xa1, 0x0a, 0xc5, 0x03, + 0x1a, 0x84, 0x09, 0x87, 0x18, 0xb7, 0xd1, 0x23, 0x28, 0x8e, 0xe5, 0xf2, 0xc9, 0xd5, 0xbf, 0x95, + 0x3e, 0x64, 0x81, 0xc1, 0x31, 0x1a, 0x7d, 0x00, 0xa5, 0xe8, 0xc8, 0xb0, 0xd9, 0x7e, 0x86, 0x8d, + 0x33, 0xc3, 0xa3, 0x6f, 0x40, 0x41, 0x2c, 0xc2, 0x7a, 0x8e, 0x4b, 0xde, 0xfe, 0x4c, 0x36, 0xc7, + 0x52, 0x08, 0x3d, 0x86, 0x62, 0xe8, 0x06, 0x86, 0xe3, 0xed, 0xd3, 0xf5, 0x3c, 0x57, 0xb0, 0x99, + 0xea, 0x64, 0xa8, 0x4d, 0x06, 0x3b, 0xfd, 0xb6, 0xb7, 0x4f, 0x9b, 0xe5, 0xb3, 0xd3, 0xcd, 0x25, + 0xd9, 0xc0, 0x4b, 0xa1, 0x1b, 0xb0, 0x1f, 0xe8, 0x16, 0xe4, 0xf6, 0x9d, 0x71, 0xb0, 0x5e, 0xa8, + 0x65, 0xde, 0x29, 0x36, 0x8b, 0x67, 0xa7, 0x9b, 0xb9, 0xad, 0x76, 0xaf, 0x8f, 0x39, 0x55, 0xfd, + 0xad, 0x0c, 0x94, 0x13, 0x3a, 0xd0, 0xeb, 0x00, 0xa1, 0x3f, 0x09, 0x42, 0xc3, 0xa7, 0x34, 0xe4, + 0xa6, 0xac, 0xe0, 0x12, 0xa7, 0x60, 0x4a, 0x43, 0x54, 0x87, 0xeb, 0x16, 0xf1, 0x43, 0xc3, 0x09, + 0x82, 0x09, 0xf1, 0x8d, 0x60, 0xb2, 0xf7, 0x31, 0xb1, 0x42, 0x6e, 0xd6, 0x0a, 0xbe, 0xc6, 0x58, + 0x6d, 0xce, 0xe9, 0x0b, 0x06, 0x7a, 0x08, 0x37, 0x92, 0xf8, 0xf1, 0x64, 0xcf, 0x75, 0x2c, 0x83, + 0x2d, 0xf5, 0x22, 0x17, 0xb9, 0x3e, 0x13, 0xe9, 0x71, 0xde, 0x13, 0x32, 0x55, 0x7f, 0x98, 0x01, + 0x05, 0x9b, 0xfb, 0xe1, 0x53, 0x32, 0xda, 0x23, 0x7e, 0x3f, 0x34, 0xc3, 0x49, 0x80, 0x6e, 0x40, + 0xc1, 0x25, 0xa6, 0x4d, 0x7c, 0x3e, 0xa8, 0x22, 0x96, 0x2d, 0xb4, 0xcb, 0xce, 0xb7, 0x69, 0x1d, + 0x98, 0x7b, 0x8e, 0xeb, 0x84, 0x53, 0x3e, 0x94, 0x95, 0xf4, 0x0d, 0x3e, 0xaf, 0xb3, 0x8e, 0x13, + 0x82, 0xf8, 0x9c, 0x1a, 0xb4, 0x0e, 0x4b, 0x23, 0x12, 0x04, 0xe6, 0x90, 0xf0, 0x91, 0x96, 0x70, + 0xd4, 0x54, 0x3f, 0x80, 0x4a, 0x52, 0x0e, 0x95, 0x61, 0x69, 0xb7, 0xf3, 0xa4, 0xd3, 0x7d, 0xde, + 0x51, 0x16, 0xd0, 0x2a, 0x94, 0x77, 0x3b, 0x58, 0x6f, 0x68, 0xdb, 0x8d, 0xe6, 0x8e, 0xae, 0x64, + 0xd0, 0x32, 0x94, 0x66, 0xcd, 0xac, 0xfa, 0xa7, 0x19, 0x00, 0x66, 0x6e, 0x39, 0xa9, 0xf7, 0x21, + 0x1f, 0x84, 0x66, 0x28, 0xf6, 0xec, 0xca, 0x83, 0xb7, 0x2e, 0x5a, 0x61, 0x39, 0x5e, 0xf6, 0x8f, + 0x60, 0x21, 0x92, 0x1c, 0x61, 0xf6, 0xdc, 0x08, 0x99, 0xfb, 0x30, 0x6d, 0xdb, 0x97, 0x03, 0xe7, + 0xbf, 0xd5, 0x0f, 0x20, 0xcf, 0xa5, 0xcf, 0x0f, 0xb7, 0x08, 0xb9, 0x16, 0xfb, 0x95, 0x41, 0x25, + 0xc8, 0x63, 0xbd, 0xd1, 0xfa, 0x8e, 0x92, 0x45, 0x0a, 0x54, 0x5a, 0xed, 0xbe, 0xd6, 0xed, 0x74, + 0x74, 0x6d, 0xa0, 0xb7, 0x94, 0x45, 0xf5, 0x36, 0xe4, 0xdb, 0x23, 0xa6, 0xf9, 0x16, 0x3b, 0x10, + 0xfb, 0xc4, 0x27, 0x9e, 0x15, 0x9d, 0xb3, 0x19, 0x41, 0xfd, 0x71, 0x19, 0xf2, 0x4f, 0xe9, 0xc4, + 0x0b, 0xd1, 0x83, 0x84, 0x53, 0x5b, 0x49, 0x8f, 0x1f, 0x38, 0xb0, 0x3e, 0x98, 0x8e, 0x89, 0x74, + 0x7a, 0x37, 0xa0, 0x20, 0x8e, 0x8e, 0x9c, 0x8e, 0x6c, 0x31, 0x7a, 0x68, 0xfa, 0x43, 0x12, 0xca, + 0xf9, 0xc8, 0x16, 0x7a, 0x87, 0xdd, 0x67, 0xa6, 0x4d, 0x3d, 0x77, 0xca, 0x4f, 0x58, 0x51, 0x5c, + 0x5a, 0x98, 0x98, 0x76, 0xd7, 0x73, 0xa7, 0x38, 0xe6, 0xa2, 0x6d, 0xa8, 0xec, 0x39, 0x9e, 0x6d, + 0xd0, 0xb1, 0xb8, 0x02, 0xf2, 0x17, 0x9f, 0x47, 0x31, 0xaa, 0xa6, 0xe3, 0xd9, 0x5d, 0x01, 0xc6, + 0xe5, 0xbd, 0x59, 0x03, 0x75, 0x60, 0xe5, 0x88, 0xba, 0x93, 0x11, 0x89, 0x75, 0x15, 0xb8, 0xae, + 0xb7, 0x2f, 0xd6, 0xf5, 0x8c, 0xe3, 0x23, 0x6d, 0xcb, 0x47, 0xc9, 0x26, 0x7a, 0x02, 0xcb, 0xe1, + 0x68, 0xbc, 0x1f, 0xc4, 0xea, 0x96, 0xb8, 0xba, 0x2f, 0x5d, 0x62, 0x30, 0x06, 0x8f, 0xb4, 0x55, + 0xc2, 0x44, 0x0b, 0x3d, 0x86, 0xb2, 0x45, 0xbd, 0xc0, 0x09, 0x42, 0xe2, 0x59, 0xd3, 0xf5, 0x22, + 0xb7, 0xfd, 0x25, 0xb3, 0xd4, 0x66, 0x60, 0x9c, 0x94, 0xac, 0xfe, 0xea, 0x22, 0x94, 0x13, 0x26, + 0x40, 0x7d, 0x28, 0x8f, 0x7d, 0x3a, 0x36, 0x87, 0xfc, 0x3e, 0x94, 0x8b, 0x7a, 0xff, 0x33, 0x99, + 0xaf, 0xde, 0x9b, 0x09, 0xe2, 0xa4, 0x16, 0xf5, 0x24, 0x0b, 0xe5, 0x04, 0x13, 0xdd, 0x81, 0x22, + 0xee, 0xe1, 0xf6, 0xb3, 0xc6, 0x40, 0x57, 0x16, 0xaa, 0xb7, 0x8e, 0x4f, 0x6a, 0xeb, 0x5c, 0x5b, + 0x52, 0x41, 0xcf, 0x77, 0x8e, 0xd8, 0x1e, 0x7e, 0x07, 0x96, 0x22, 0x68, 0xa6, 0xfa, 0xc5, 0xe3, + 0x93, 0xda, 0x6b, 0xf3, 0xd0, 0x04, 0x12, 0xf7, 0xb7, 0x1b, 0x58, 0x6f, 0x29, 0xd9, 0x74, 0x24, + 0xee, 0x1f, 0x98, 0x3e, 0xb1, 0xd1, 0x97, 0xa0, 0x20, 0x81, 0x8b, 0xd5, 0xea, 0xf1, 0x49, 0xed, + 0xc6, 0x3c, 0x70, 0x86, 0xc3, 0xfd, 0x9d, 0xc6, 0x33, 0x5d, 0xc9, 0xa5, 0xe3, 0x70, 0xdf, 0x35, + 0x8f, 0x08, 0x7a, 0x0b, 0xf2, 0x02, 0x96, 0xaf, 0xde, 0x3c, 0x3e, 0xa9, 0x7d, 0xe1, 0x15, 0x75, + 0x0c, 0x55, 0x5d, 0xff, 0x8d, 0x3f, 0xdc, 0x58, 0xf8, 0xcb, 0x3f, 0xda, 0x50, 0xe6, 0xd9, 0xd5, + 0xff, 0xc9, 0xc0, 0xf2, 0xb9, 0xbd, 0x83, 0x54, 0x28, 0x78, 0xd4, 0xa2, 0x63, 0x71, 0x4d, 0x16, + 0x9b, 0x70, 0x76, 0xba, 0x59, 0xe8, 0x50, 0x8d, 0x8e, 0xa7, 0x58, 0x72, 0xd0, 0x93, 0xb9, 0x8b, + 0xfe, 0xe1, 0x67, 0xdc, 0x98, 0xa9, 0x57, 0xfd, 0x87, 0xb0, 0x6c, 0xfb, 0xce, 0x11, 0xf1, 0x0d, + 0x8b, 0x7a, 0xfb, 0xce, 0x50, 0x5e, 0x81, 0xd5, 0xd4, 0x68, 0x94, 0x03, 0x71, 0x45, 0x08, 0x68, + 0x1c, 0xff, 0x13, 0x5c, 0xf2, 0xd5, 0x67, 0x50, 0x49, 0x6e, 0x75, 0x76, 0x2f, 0x05, 0xce, 0x2f, + 0x12, 0x19, 0x76, 0xf2, 0x20, 0x15, 0x97, 0x18, 0x45, 0x04, 0x9d, 0x6f, 0x43, 0x6e, 0x44, 0x6d, + 0xa1, 0x67, 0xb9, 0x79, 0x9d, 0xc5, 0x1a, 0xff, 0x7c, 0xba, 0x59, 0xa6, 0x41, 0x7d, 0xcb, 0x71, + 0xc9, 0x53, 0x6a, 0x13, 0xcc, 0x01, 0xea, 0x11, 0xe4, 0x98, 0xcf, 0x41, 0x5f, 0x84, 0x5c, 0xb3, + 0xdd, 0x69, 0x29, 0x0b, 0xd5, 0x6b, 0xc7, 0x27, 0xb5, 0x65, 0x6e, 0x12, 0xc6, 0x60, 0x7b, 0x17, + 0x6d, 0x42, 0xe1, 0x59, 0x77, 0x67, 0xf7, 0x29, 0xdb, 0x5e, 0xd7, 0x8f, 0x4f, 0x6a, 0xab, 0x31, + 0x5b, 0x18, 0x0d, 0xbd, 0x0e, 0xf9, 0xc1, 0xd3, 0xde, 0x56, 0x5f, 0xc9, 0x56, 0xd1, 0xf1, 0x49, + 0x6d, 0x25, 0xe6, 0xf3, 0x31, 0x57, 0xaf, 0xc9, 0x55, 0x2d, 0xc5, 0x74, 0xf5, 0x07, 0x19, 0x28, + 0x27, 0x0e, 0x1c, 0xdb, 0x98, 0x2d, 0x7d, 0xab, 0xb1, 0xbb, 0x33, 0x50, 0x16, 0x12, 0x1b, 0x33, + 0x01, 0x69, 0x91, 0x7d, 0x73, 0xe2, 0x32, 0x3f, 0x07, 0x5a, 0xb7, 0xd3, 0x6f, 0xf7, 0x07, 0x7a, + 0x67, 0xa0, 0x64, 0xaa, 0xeb, 0xc7, 0x27, 0xb5, 0xb5, 0x79, 0xf0, 0xd6, 0xc4, 0x75, 0xd9, 0xd6, + 0xd4, 0x1a, 0xda, 0x36, 0xdf, 0xeb, 0xb3, 0xad, 0x99, 0x40, 0x69, 0xa6, 0x75, 0x40, 0x6c, 0xf4, + 0x2e, 0x94, 0x5a, 0xfa, 0x8e, 0xfe, 0xb8, 0xc1, 0xbd, 0x7b, 0xf5, 0xf5, 0xe3, 0x93, 0xda, 0xcd, + 0x57, 0x7b, 0x77, 0xc9, 0xd0, 0x0c, 0x89, 0x3d, 0xb7, 0x45, 0x13, 0x10, 0xf5, 0xbf, 0xb2, 0xb0, + 0x8c, 0x59, 0xb2, 0xec, 0x87, 0x3d, 0xea, 0x3a, 0xd6, 0x14, 0xf5, 0xa0, 0x64, 0x51, 0xcf, 0x76, + 0x12, 0x7e, 0xe2, 0xc1, 0x05, 0x01, 0xd3, 0x4c, 0x2a, 0x6a, 0x69, 0x91, 0x24, 0x9e, 0x29, 0x41, + 0x77, 0x21, 0x6f, 0x13, 0xd7, 0x9c, 0xca, 0xc8, 0xed, 0x66, 0x5d, 0xa4, 0xe3, 0xf5, 0x28, 0x1d, + 0xaf, 0xb7, 0x64, 0x3a, 0x8e, 0x05, 0x8e, 0x67, 0x28, 0xe6, 0x4b, 0xc3, 0x0c, 0x43, 0x32, 0x1a, + 0x87, 0x22, 0x6c, 0xcb, 0xe1, 0xf2, 0xc8, 0x7c, 0xd9, 0x90, 0x24, 0x74, 0x1f, 0x0a, 0x2f, 0x1c, + 0xcf, 0xa6, 0x2f, 0x64, 0x64, 0x76, 0x89, 0x52, 0x09, 0x54, 0x8f, 0x59, 0x48, 0x32, 0x37, 0x4c, + 0xb6, 0x87, 0x3a, 0xdd, 0x8e, 0x1e, 0xed, 0x21, 0xc9, 0xef, 0x7a, 0x1d, 0xea, 0xb1, 0xf3, 0x0f, + 0xdd, 0x8e, 0xb1, 0xd5, 0x68, 0xef, 0xec, 0x62, 0xb6, 0x8f, 0xd6, 0x8e, 0x4f, 0x6a, 0x4a, 0x0c, + 0xd9, 0x32, 0x1d, 0x97, 0xa5, 0x0a, 0x37, 0x61, 0xb1, 0xd1, 0xf9, 0x8e, 0x92, 0xad, 0x2a, 0xc7, + 0x27, 0xb5, 0x4a, 0xcc, 0x6e, 0x78, 0xd3, 0x99, 0xdd, 0xe7, 0xfb, 0x55, 0xff, 0x6e, 0x11, 0x2a, + 0xbb, 0x63, 0xdb, 0x0c, 0x89, 0x38, 0x67, 0xa8, 0x06, 0xe5, 0xb1, 0xe9, 0x9b, 0xae, 0x4b, 0x5c, + 0x27, 0x18, 0xc9, 0x42, 0x43, 0x92, 0x84, 0xde, 0xfb, 0xac, 0x66, 0x6c, 0x16, 0xd9, 0xd9, 0xf9, + 0xde, 0xbf, 0x6e, 0x66, 0x22, 0x83, 0xee, 0xc2, 0xca, 0xbe, 0x18, 0xad, 0x61, 0x5a, 0x7c, 0x61, + 0x17, 0xf9, 0xc2, 0xd6, 0xd3, 0x16, 0x36, 0x39, 0xac, 0xba, 0x9c, 0x64, 0x83, 0x4b, 0xe1, 0xe5, + 0xfd, 0x64, 0x13, 0x3d, 0x84, 0xa5, 0x11, 0xf5, 0x9c, 0x90, 0xfa, 0x57, 0xaf, 0x42, 0x84, 0x44, + 0x77, 0xe0, 0x1a, 0x5b, 0xdc, 0x68, 0x3c, 0x9c, 0xcd, 0xaf, 0xf3, 0x2c, 0x5e, 0x1d, 0x99, 0x2f, + 0x65, 0x87, 0x98, 0x91, 0x51, 0x13, 0xf2, 0xd4, 0x67, 0xf1, 0x62, 0x81, 0x0f, 0xf7, 0xdd, 0x2b, + 0x87, 0x2b, 0x1a, 0x5d, 0x26, 0x83, 0x85, 0xa8, 0xfa, 0x75, 0x58, 0x3e, 0x37, 0x09, 0x16, 0x26, + 0xf5, 0x1a, 0xbb, 0x7d, 0x5d, 0x59, 0x40, 0x15, 0x28, 0x6a, 0xdd, 0xce, 0xa0, 0xdd, 0xd9, 0x65, + 0x71, 0x5e, 0x05, 0x8a, 0xb8, 0xbb, 0xb3, 0xd3, 0x6c, 0x68, 0x4f, 0x94, 0xac, 0x5a, 0x87, 0x72, + 0x42, 0x1b, 0x5a, 0x01, 0xe8, 0x0f, 0xba, 0x3d, 0x63, 0xab, 0x8d, 0xfb, 0x03, 0x11, 0x25, 0xf6, + 0x07, 0x0d, 0x3c, 0x90, 0x84, 0x8c, 0xfa, 0x1f, 0xd9, 0x68, 0x45, 0x65, 0x60, 0xd8, 0x3c, 0x1f, + 0x18, 0x5e, 0x32, 0x78, 0x19, 0x1a, 0xce, 0x1a, 0x71, 0x80, 0xf8, 0x1e, 0x00, 0xdf, 0x38, 0xc4, + 0x36, 0xcc, 0x50, 0x2e, 0x7c, 0xf5, 0x15, 0x23, 0x0f, 0xa2, 0x7a, 0x17, 0x2e, 0x49, 0x74, 0x23, + 0x44, 0xdf, 0x80, 0x8a, 0x45, 0x47, 0x63, 0x97, 0x48, 0xe1, 0xc5, 0x2b, 0x85, 0xcb, 0x31, 0xbe, + 0x11, 0x26, 0x43, 0xd3, 0xdc, 0xf9, 0xe0, 0xf9, 0xd7, 0x32, 0x91, 0x65, 0x52, 0xa2, 0xd1, 0x0a, + 0x14, 0x77, 0x7b, 0xad, 0xc6, 0xa0, 0xdd, 0x79, 0xac, 0x64, 0x10, 0x40, 0x81, 0x9b, 0xba, 0xa5, + 0x64, 0x59, 0x14, 0xad, 0x75, 0x9f, 0xf6, 0x76, 0x74, 0xee, 0xb1, 0xd0, 0x1a, 0x28, 0x91, 0xb1, + 0x0d, 0x6e, 0x48, 0xbd, 0xa5, 0xe4, 0xd0, 0x75, 0x58, 0x8d, 0xa9, 0x52, 0x32, 0x8f, 0x6e, 0x00, + 0x8a, 0x89, 0x33, 0x15, 0x05, 0xf5, 0x97, 0x61, 0x55, 0xa3, 0x5e, 0x68, 0x3a, 0x5e, 0x9c, 0x61, + 0x3c, 0x60, 0x93, 0x96, 0x24, 0xc3, 0x91, 0x75, 0xa2, 0xe6, 0xea, 0xd9, 0xe9, 0x66, 0x39, 0x86, + 0xb6, 0x5b, 0x3c, 0x54, 0x92, 0x0d, 0x9b, 0x9d, 0xdf, 0xb1, 0x63, 0x73, 0xe3, 0xe6, 0x9b, 0x4b, + 0x67, 0xa7, 0x9b, 0x8b, 0xbd, 0x76, 0x0b, 0x33, 0x1a, 0xfa, 0x22, 0x94, 0xc8, 0x4b, 0x27, 0x34, + 0x2c, 0x76, 0x2f, 0x31, 0x03, 0xe6, 0x71, 0x91, 0x11, 0x34, 0x76, 0x0d, 0x35, 0x01, 0x7a, 0xd4, + 0x0f, 0x65, 0xcf, 0x5f, 0x85, 0xfc, 0x98, 0xfa, 0xbc, 0xb2, 0x71, 0x61, 0xbd, 0x8d, 0xc1, 0xc5, + 0x46, 0xc5, 0x02, 0xac, 0xfe, 0xee, 0x22, 0xc0, 0xc0, 0x0c, 0x0e, 0xa5, 0x92, 0x47, 0x50, 0x8a, + 0x6b, 0x97, 0xb2, 0x44, 0x72, 0xe9, 0x6a, 0xc7, 0x60, 0xf4, 0x30, 0xda, 0x6c, 0x22, 0x77, 0x4a, + 0x4d, 0x71, 0xa3, 0x8e, 0xd2, 0xd2, 0x8f, 0xf3, 0x09, 0x12, 0xbb, 0xe6, 0x89, 0xef, 0xcb, 0x95, + 0x67, 0x3f, 0x91, 0xc6, 0xaf, 0x05, 0x61, 0x34, 0x19, 0x7d, 0xa7, 0x16, 0x85, 0xe6, 0x56, 0x64, + 0x7b, 0x01, 0xcf, 0xe4, 0xd0, 0x87, 0x50, 0x66, 0xf3, 0x36, 0x02, 0xce, 0x93, 0x81, 0xf7, 0x85, + 0xa6, 0x12, 0x1a, 0x30, 0x8c, 0x67, 0x56, 0x7e, 0x1d, 0xc0, 0x1c, 0x8f, 0x5d, 0x87, 0xd8, 0xc6, + 0xde, 0x94, 0x47, 0xda, 0x25, 0x5c, 0x92, 0x94, 0xe6, 0x94, 0x1d, 0x97, 0x88, 0x6d, 0x86, 0x3c, + 0x7a, 0xbe, 0xc2, 0x80, 0x12, 0xdd, 0x08, 0x9b, 0x0a, 0xac, 0xf8, 0x13, 0x8f, 0x19, 0x54, 0x8e, + 0x4e, 0xfd, 0x93, 0x2c, 0xbc, 0xd6, 0x21, 0xe1, 0x0b, 0xea, 0x1f, 0x36, 0xc2, 0xd0, 0xb4, 0x0e, + 0x46, 0xc4, 0x93, 0xcb, 0x97, 0x48, 0x68, 0x32, 0xe7, 0x12, 0x9a, 0x75, 0x58, 0x32, 0x5d, 0xc7, + 0x0c, 0x88, 0x08, 0xde, 0x4a, 0x38, 0x6a, 0xb2, 0xb4, 0x8b, 0x25, 0x71, 0x24, 0x08, 0x88, 0xa8, + 0xba, 0xb0, 0x81, 0x47, 0x04, 0xf4, 0x5d, 0xb8, 0x21, 0xc3, 0x34, 0x33, 0xee, 0x8a, 0x25, 0x14, + 0x51, 0xf9, 0x56, 0x4f, 0xcd, 0x2a, 0xd3, 0x07, 0x27, 0xe3, 0xb8, 0x19, 0xb9, 0x3b, 0x0e, 0x65, + 0x54, 0xb8, 0x66, 0xa7, 0xb0, 0xaa, 0x8f, 0xe1, 0xe6, 0x85, 0x22, 0x9f, 0xab, 0xaa, 0xf3, 0x8f, + 0x59, 0x80, 0x76, 0xaf, 0xf1, 0x54, 0x1a, 0xa9, 0x05, 0x85, 0x7d, 0x73, 0xe4, 0xb8, 0xd3, 0xcb, + 0x3c, 0xe0, 0x0c, 0x5f, 0x6f, 0x08, 0x73, 0x6c, 0x71, 0x19, 0x2c, 0x65, 0x79, 0x4e, 0x39, 0xd9, + 0xf3, 0x48, 0x18, 0xe7, 0x94, 0xbc, 0xc5, 0x86, 0xe1, 0x9b, 0x5e, 0xbc, 0x75, 0x45, 0x83, 0x2d, + 0x00, 0x0b, 0x79, 0x5e, 0x98, 0xd3, 0xc8, 0x6d, 0xc9, 0x26, 0xda, 0xe6, 0xb5, 0x53, 0xe2, 0x1f, + 0x11, 0x7b, 0x3d, 0xcf, 0x8d, 0x7a, 0xd5, 0x78, 0xb0, 0x84, 0x0b, 0xdb, 0xc5, 0xd2, 0xd5, 0x0f, + 0x78, 0xc8, 0x34, 0x63, 0x7d, 0x2e, 0x1b, 0xdd, 0x83, 0xe5, 0x73, 0xf3, 0x7c, 0x25, 0x99, 0x6f, + 0xf7, 0x9e, 0x7d, 0x55, 0xc9, 0xc9, 0x5f, 0x5f, 0x57, 0x0a, 0xea, 0xdf, 0x2c, 0x0a, 0x47, 0x23, + 0xad, 0x9a, 0xfe, 0x66, 0x50, 0xe4, 0xbb, 0xdb, 0xa2, 0xae, 0x74, 0x00, 0x6f, 0x5f, 0xee, 0x7f, + 0x58, 0x4e, 0xc7, 0xe1, 0x38, 0x16, 0x44, 0x9b, 0x50, 0x16, 0xbb, 0xd8, 0x60, 0x07, 0x8e, 0x9b, + 0x75, 0x19, 0x83, 0x20, 0x31, 0x49, 0x74, 0x1b, 0x56, 0x78, 0xf1, 0x27, 0x38, 0x20, 0xb6, 0xc0, + 0xe4, 0x38, 0x66, 0x39, 0xa6, 0x72, 0xd8, 0x53, 0xa8, 0x48, 0x82, 0xc1, 0xe3, 0xf9, 0x3c, 0x1f, + 0xd0, 0x9d, 0xab, 0x06, 0x24, 0x44, 0x78, 0x98, 0x5f, 0x1e, 0xcf, 0x1a, 0xea, 0xcf, 0x43, 0x31, + 0x1a, 0x2c, 0x5a, 0x87, 0xc5, 0x81, 0xd6, 0x53, 0x16, 0xaa, 0xab, 0xc7, 0x27, 0xb5, 0x72, 0x44, + 0x1e, 0x68, 0x3d, 0xc6, 0xd9, 0x6d, 0xf5, 0x94, 0xcc, 0x79, 0xce, 0x6e, 0xab, 0x87, 0xaa, 0x90, + 0xeb, 0x6b, 0x83, 0x5e, 0x14, 0x9f, 0x45, 0x2c, 0x46, 0xab, 0xe6, 0x58, 0x7c, 0xa6, 0xee, 0x43, + 0x39, 0xd1, 0x3b, 0x7a, 0x13, 0x96, 0xda, 0x9d, 0xc7, 0x58, 0xef, 0xf7, 0x95, 0x85, 0xea, 0x8d, + 0xe3, 0x93, 0x1a, 0x4a, 0x70, 0xdb, 0xde, 0x90, 0xad, 0x1d, 0x7a, 0x1d, 0x72, 0xdb, 0x5d, 0x76, + 0xef, 0x8b, 0xe4, 0x22, 0x81, 0xd8, 0xa6, 0x41, 0x58, 0xbd, 0x2e, 0x03, 0xbf, 0xa4, 0x62, 0xf5, + 0xf7, 0x32, 0x50, 0x10, 0x07, 0x2d, 0x75, 0x11, 0x1b, 0xb0, 0x14, 0x95, 0x10, 0x44, 0xe2, 0xf7, + 0xf6, 0xc5, 0x49, 0x5a, 0x5d, 0xe6, 0x54, 0x62, 0x6b, 0x46, 0x72, 0xd5, 0xf7, 0xa1, 0x92, 0x64, + 0x7c, 0xae, 0x8d, 0xf9, 0x5d, 0x28, 0xb3, 0xbd, 0x1f, 0x25, 0x6b, 0x0f, 0xa0, 0x20, 0x9c, 0x45, + 0x7c, 0x0f, 0x5d, 0x9c, 0x31, 0x4a, 0x24, 0x7a, 0x04, 0x4b, 0x22, 0xcb, 0x8c, 0xea, 0xca, 0x1b, + 0x97, 0x9f, 0x30, 0x1c, 0xc1, 0xd5, 0x0f, 0x21, 0xd7, 0x23, 0xc4, 0x67, 0xb6, 0xf7, 0xa8, 0x4d, + 0x66, 0x57, 0xb7, 0x4c, 0x90, 0x6d, 0xd2, 0x6e, 0xb1, 0x04, 0xd9, 0x26, 0x6d, 0x3b, 0xae, 0x8d, + 0x65, 0x13, 0xb5, 0xb1, 0x01, 0x54, 0x9e, 0x13, 0x67, 0x78, 0x10, 0x12, 0x9b, 0x2b, 0x7a, 0x17, + 0x72, 0x63, 0x12, 0x0f, 0x7e, 0x3d, 0x75, 0xf3, 0x11, 0xe2, 0x63, 0x8e, 0x62, 0x3e, 0xe6, 0x05, + 0x97, 0x96, 0x8f, 0x21, 0xb2, 0xa5, 0xfe, 0x43, 0x16, 0x56, 0xda, 0x41, 0x30, 0x31, 0x3d, 0x2b, + 0x8a, 0xea, 0xbe, 0x79, 0x3e, 0xaa, 0x4b, 0x7d, 0x35, 0x3a, 0x2f, 0x72, 0xbe, 0xe4, 0x27, 0x6f, + 0xd6, 0x6c, 0x7c, 0xb3, 0xaa, 0x3f, 0xce, 0x44, 0x75, 0xbd, 0xdb, 0x09, 0x57, 0x20, 0x72, 0xc4, + 0xa4, 0x26, 0xb2, 0xeb, 0x1d, 0x7a, 0xf4, 0x85, 0x87, 0xde, 0x80, 0x3c, 0xd6, 0x3b, 0xfa, 0x73, + 0x25, 0x23, 0xb6, 0xe7, 0x39, 0x10, 0x26, 0x1e, 0x79, 0xc1, 0x34, 0xf5, 0xf4, 0x4e, 0x8b, 0x45, + 0x61, 0xd9, 0x14, 0x4d, 0x3d, 0xe2, 0xd9, 0x8e, 0x37, 0x44, 0x6f, 0x42, 0xa1, 0xdd, 0xef, 0xef, + 0xf2, 0x14, 0xf2, 0xb5, 0xe3, 0x93, 0xda, 0xf5, 0x73, 0x28, 0x5e, 0xd3, 0xb5, 0x19, 0x88, 0xa5, + 0x40, 0x2c, 0x3e, 0x4b, 0x01, 0xb1, 0xd8, 0x5a, 0x80, 0x70, 0x77, 0xd0, 0x18, 0xe8, 0x4a, 0x3e, + 0x05, 0x84, 0x29, 0xfb, 0x2b, 0x8f, 0xdb, 0xbf, 0x64, 0x41, 0x69, 0x58, 0x16, 0x19, 0x87, 0x8c, + 0x2f, 0xb3, 0xce, 0x01, 0x14, 0xc7, 0xec, 0x97, 0x43, 0xa2, 0x08, 0xea, 0x51, 0xea, 0xbb, 0xe7, + 0x9c, 0x5c, 0x1d, 0x53, 0x97, 0x34, 0xec, 0x91, 0x13, 0x04, 0x0e, 0xf5, 0x04, 0x0d, 0xc7, 0x9a, + 0xaa, 0xff, 0x99, 0x81, 0xeb, 0x29, 0x08, 0x74, 0x0f, 0x72, 0x3e, 0x75, 0xa3, 0x35, 0xbc, 0x75, + 0x51, 0xc9, 0x96, 0x89, 0x62, 0x8e, 0x44, 0x1b, 0x00, 0xe6, 0x24, 0xa4, 0x26, 0xef, 0x9f, 0xaf, + 0x5e, 0x11, 0x27, 0x28, 0xe8, 0x39, 0x14, 0x02, 0x62, 0xf9, 0x24, 0x8a, 0xb3, 0x3f, 0xfc, 0xbf, + 0x8e, 0xbe, 0xde, 0xe7, 0x6a, 0xb0, 0x54, 0x57, 0xad, 0x43, 0x41, 0x50, 0xd8, 0xb6, 0xb7, 0xcd, + 0xd0, 0x94, 0x05, 0x7d, 0xfe, 0x9b, 0xed, 0x26, 0xd3, 0x1d, 0x46, 0xbb, 0xc9, 0x74, 0x87, 0xea, + 0x5f, 0x67, 0x01, 0xf4, 0x97, 0x21, 0xf1, 0x3d, 0xd3, 0xd5, 0x1a, 0x48, 0x4f, 0xdc, 0x0c, 0x62, + 0xb6, 0x5f, 0x4e, 0x7d, 0xc3, 0x88, 0x25, 0xea, 0x5a, 0x23, 0xe5, 0x6e, 0xb8, 0x09, 0x8b, 0x13, + 0x5f, 0x3e, 0x65, 0x8b, 0x18, 0x79, 0x17, 0xef, 0x60, 0x46, 0x43, 0xfa, 0xcc, 0x6d, 0x2d, 0x5e, + 0xfc, 0x60, 0x9d, 0xe8, 0x20, 0xd5, 0x75, 0xb1, 0x93, 0x6f, 0x99, 0x86, 0x45, 0xe4, 0xad, 0x52, + 0x11, 0x27, 0x5f, 0x6b, 0x68, 0xc4, 0x0f, 0x71, 0xc1, 0x32, 0xd9, 0xff, 0x9f, 0xc8, 0xbf, 0xbd, + 0x0b, 0x30, 0x9b, 0x1a, 0xda, 0x80, 0xbc, 0xb6, 0xd5, 0xef, 0xef, 0x28, 0x0b, 0xc2, 0x81, 0xcf, + 0x58, 0x9c, 0xac, 0xfe, 0x45, 0x16, 0x8a, 0x5a, 0x43, 0x5e, 0xb9, 0x1a, 0x28, 0xdc, 0x2b, 0xf1, + 0x67, 0x10, 0xf2, 0x72, 0xec, 0xf8, 0x53, 0xe9, 0x58, 0x2e, 0x49, 0x78, 0x57, 0x98, 0x08, 0x1b, + 0xb5, 0xce, 0x05, 0x10, 0x86, 0x0a, 0x91, 0x46, 0x30, 0x2c, 0x33, 0xf2, 0xf1, 0x1b, 0x97, 0x1b, + 0x4b, 0xa4, 0x2e, 0xb3, 0x76, 0x80, 0xcb, 0x91, 0x12, 0xcd, 0x0c, 0xd0, 0x7b, 0xb0, 0x1a, 0x38, + 0x43, 0xcf, 0xf1, 0x86, 0x46, 0x64, 0x3c, 0xfe, 0x26, 0xd3, 0xbc, 0x76, 0x76, 0xba, 0xb9, 0xdc, + 0x17, 0x2c, 0x69, 0xc3, 0x65, 0x89, 0xd4, 0xb8, 0x29, 0xd1, 0xd7, 0x61, 0x25, 0x21, 0xca, 0xac, + 0x28, 0xcc, 0xae, 0x9c, 0x9d, 0x6e, 0x56, 0x62, 0xc9, 0x27, 0x64, 0x8a, 0x2b, 0xb1, 0xe0, 0x13, + 0xc2, 0x6b, 0x33, 0xfb, 0xd4, 0xb7, 0x88, 0xe1, 0xf3, 0x33, 0xcd, 0x6f, 0xf7, 0x1c, 0x2e, 0x73, + 0x9a, 0x38, 0xe6, 0xea, 0x33, 0xb8, 0xde, 0xf5, 0xad, 0x03, 0x12, 0x84, 0xc2, 0x14, 0xd2, 0x8a, + 0x1f, 0xc2, 0xad, 0xd0, 0x0c, 0x0e, 0x8d, 0x03, 0x27, 0x08, 0xa9, 0x3f, 0x35, 0x7c, 0x12, 0x12, + 0x8f, 0xf1, 0x0d, 0xfe, 0xcc, 0x2b, 0x0b, 0x82, 0x37, 0x19, 0x66, 0x5b, 0x40, 0x70, 0x84, 0xd8, + 0x61, 0x00, 0xb5, 0x0d, 0x15, 0x96, 0xc2, 0xc8, 0xa2, 0x1a, 0x9b, 0x3d, 0xb8, 0x74, 0x68, 0x7c, + 0xe6, 0x6b, 0xaa, 0xe4, 0xd2, 0xa1, 0xf8, 0xa9, 0x7e, 0x1b, 0x94, 0x96, 0x13, 0x8c, 0xcd, 0xd0, + 0x3a, 0x88, 0x2a, 0x9d, 0xa8, 0x05, 0xca, 0x01, 0x31, 0xfd, 0x70, 0x8f, 0x98, 0xa1, 0x31, 0x26, + 0xbe, 0x43, 0xed, 0xab, 0x57, 0x79, 0x35, 0x16, 0xe9, 0x71, 0x09, 0xf5, 0xbf, 0x33, 0x00, 0xd8, + 0xdc, 0x8f, 0xa2, 0xb5, 0xaf, 0xc0, 0xb5, 0xc0, 0x33, 0xc7, 0xc1, 0x01, 0x0d, 0x0d, 0xc7, 0x0b, + 0x89, 0x7f, 0x64, 0xba, 0xb2, 0xb8, 0xa3, 0x44, 0x8c, 0xb6, 0xa4, 0xa3, 0x77, 0x01, 0x1d, 0x12, + 0x32, 0x36, 0xa8, 0x6b, 0x1b, 0x11, 0x53, 0x3c, 0x42, 0xe7, 0xb0, 0xc2, 0x38, 0x5d, 0xd7, 0xee, + 0x47, 0x74, 0xd4, 0x84, 0x0d, 0x36, 0x7d, 0xe2, 0x85, 0xbe, 0x43, 0x02, 0x63, 0x9f, 0xfa, 0x46, + 0xe0, 0xd2, 0x17, 0xc6, 0x3e, 0x75, 0x5d, 0xfa, 0x82, 0xf8, 0x51, 0xdd, 0xac, 0xea, 0xd2, 0xa1, + 0x2e, 0x40, 0x5b, 0xd4, 0xef, 0xbb, 0xf4, 0xc5, 0x56, 0x84, 0x60, 0x21, 0xdd, 0x6c, 0xce, 0xa1, + 0x63, 0x1d, 0x46, 0x21, 0x5d, 0x4c, 0x1d, 0x38, 0xd6, 0x21, 0x7a, 0x13, 0x96, 0x89, 0x4b, 0x78, + 0xf9, 0x44, 0xa0, 0xf2, 0x1c, 0x55, 0x89, 0x88, 0x0c, 0xa4, 0x7e, 0x04, 0x8a, 0xee, 0x59, 0xfe, + 0x74, 0x9c, 0x58, 0xf3, 0x77, 0x01, 0x31, 0x27, 0x69, 0xb8, 0xd4, 0x3a, 0x34, 0x46, 0xa6, 0x67, + 0x0e, 0xd9, 0xb8, 0xc4, 0xeb, 0x9f, 0xc2, 0x38, 0x3b, 0xd4, 0x3a, 0x7c, 0x2a, 0xe9, 0xea, 0x7b, + 0x00, 0xfd, 0xb1, 0x4f, 0x4c, 0xbb, 0xcb, 0xa2, 0x09, 0x66, 0x3a, 0xde, 0x32, 0x6c, 0xf9, 0xb6, + 0x4a, 0x7d, 0x79, 0xd4, 0x15, 0xc1, 0x68, 0xc5, 0x74, 0xf5, 0x67, 0xe1, 0x7a, 0xcf, 0x35, 0x2d, + 0xfe, 0x9d, 0x41, 0x2f, 0x7e, 0xce, 0x42, 0x8f, 0xa0, 0x20, 0xa0, 0x72, 0x25, 0x53, 0x8f, 0xdb, + 0xac, 0xcf, 0xed, 0x05, 0x2c, 0xf1, 0xcd, 0x0a, 0xc0, 0x4c, 0x8f, 0xfa, 0x67, 0x19, 0x28, 0xc5, + 0xfa, 0x51, 0x4d, 0xbc, 0xd2, 0x84, 0xbe, 0xe9, 0x78, 0x32, 0xe3, 0x2f, 0xe1, 0x24, 0x09, 0xb5, + 0xa1, 0x3c, 0x8e, 0xa5, 0x2f, 0x8d, 0xe7, 0x52, 0x46, 0x8d, 0x93, 0xb2, 0xe8, 0x7d, 0x28, 0x45, + 0x8f, 0xd9, 0x91, 0x87, 0xbd, 0xfc, 0xed, 0x7b, 0x06, 0x57, 0xbf, 0x09, 0xf0, 0x2d, 0xea, 0x78, + 0x03, 0x7a, 0x48, 0x3c, 0xfe, 0xfc, 0xca, 0xf2, 0x45, 0x12, 0x59, 0x51, 0xb6, 0x78, 0x19, 0x40, + 0x2c, 0x41, 0xfc, 0x0a, 0x29, 0x9a, 0xea, 0x5f, 0x65, 0xa1, 0x80, 0x29, 0x0d, 0xb5, 0x06, 0xaa, + 0x41, 0x41, 0xfa, 0x09, 0x7e, 0xff, 0x34, 0x4b, 0x67, 0xa7, 0x9b, 0x79, 0xe1, 0x20, 0xf2, 0x16, + 0xf7, 0x0c, 0x09, 0x0f, 0x9e, 0xbd, 0xc8, 0x83, 0xa3, 0x7b, 0x50, 0x91, 0x20, 0xe3, 0xc0, 0x0c, + 0x0e, 0x44, 0xf2, 0xd6, 0x5c, 0x39, 0x3b, 0xdd, 0x04, 0x81, 0xdc, 0x36, 0x83, 0x03, 0x0c, 0x02, + 0xcd, 0x7e, 0x23, 0x1d, 0xca, 0x1f, 0x53, 0xc7, 0x33, 0x42, 0x3e, 0x09, 0x59, 0x68, 0x4c, 0x5d, + 0xc7, 0xd9, 0x54, 0xe5, 0x97, 0x0a, 0xf0, 0xf1, 0x6c, 0xf2, 0x3a, 0x2c, 0xfb, 0x94, 0x86, 0xc2, + 0x6d, 0x39, 0xd4, 0x93, 0x35, 0x8c, 0x5a, 0x6a, 0x69, 0x9b, 0xd2, 0x10, 0x4b, 0x1c, 0xae, 0xf8, + 0x89, 0x16, 0xba, 0x07, 0x6b, 0xae, 0x19, 0x84, 0x06, 0xf7, 0x77, 0xf6, 0x4c, 0x5b, 0x81, 0x1f, + 0x35, 0xc4, 0x78, 0x5b, 0x9c, 0x15, 0x49, 0xa8, 0xff, 0x94, 0x81, 0x32, 0x9b, 0x8c, 0xb3, 0xef, + 0x58, 0x2c, 0xc8, 0xfb, 0xfc, 0xb1, 0xc7, 0x4d, 0x58, 0xb4, 0x02, 0x5f, 0x1a, 0x95, 0x5f, 0xbe, + 0x5a, 0x1f, 0x63, 0x46, 0x43, 0x1f, 0x41, 0x41, 0xd6, 0x52, 0x44, 0xd8, 0xa1, 0x5e, 0x1d, 0x8e, + 0x4a, 0xdb, 0x48, 0x39, 0xbe, 0x97, 0x67, 0xa3, 0x13, 0x97, 0x00, 0x4e, 0x92, 0xd0, 0x0d, 0xc8, + 0x5a, 0xc2, 0x5c, 0xf2, 0x53, 0x18, 0xad, 0x83, 0xb3, 0x96, 0xa7, 0xfe, 0x20, 0x03, 0xcb, 0xb3, + 0x03, 0xcf, 0x76, 0xc0, 0x2d, 0x28, 0x05, 0x93, 0xbd, 0x60, 0x1a, 0x84, 0x64, 0x14, 0x3d, 0x2d, + 0xc7, 0x04, 0xd4, 0x86, 0x92, 0xe9, 0x0e, 0xa9, 0xef, 0x84, 0x07, 0x23, 0x99, 0xa5, 0xa6, 0x87, + 0x0a, 0x49, 0x9d, 0xf5, 0x46, 0x24, 0x82, 0x67, 0xd2, 0xd1, 0xbd, 0x2f, 0xbe, 0x3f, 0xe0, 0xf7, + 0xfe, 0x1b, 0x50, 0x71, 0xcd, 0x11, 0x2f, 0x2e, 0x85, 0xce, 0x48, 0xcc, 0x23, 0x87, 0xcb, 0x92, + 0x36, 0x70, 0x46, 0x44, 0x55, 0xa1, 0x14, 0x2b, 0x43, 0xab, 0x50, 0x6e, 0xe8, 0x7d, 0xe3, 0xfe, + 0x83, 0x47, 0xc6, 0x63, 0xed, 0xa9, 0xb2, 0x20, 0x63, 0xd3, 0x3f, 0xcf, 0xc0, 0xb2, 0x74, 0x47, + 0x32, 0xde, 0x7f, 0x13, 0x96, 0x7c, 0x73, 0x3f, 0x8c, 0x32, 0x92, 0x9c, 0xd8, 0xd5, 0xcc, 0xc3, + 0xb3, 0x8c, 0x84, 0xb1, 0xd2, 0x33, 0x92, 0xc4, 0xc7, 0x0e, 0x8b, 0x97, 0x7e, 0xec, 0x90, 0xfb, + 0xa9, 0x7c, 0xec, 0xa0, 0xfe, 0x0a, 0xc0, 0x96, 0xe3, 0x92, 0x81, 0xa8, 0x43, 0xa5, 0xe5, 0x97, + 0x2c, 0x86, 0x93, 0x75, 0xce, 0x28, 0x86, 0x6b, 0xb7, 0x30, 0xa3, 0x31, 0xd6, 0xd0, 0xb1, 0xe5, + 0x61, 0xe4, 0xac, 0xc7, 0x8c, 0x35, 0x74, 0xec, 0xf8, 0x55, 0x2e, 0x77, 0xd5, 0xab, 0xdc, 0x49, + 0x06, 0x56, 0x65, 0xec, 0x1a, 0xbb, 0xdf, 0x2f, 0x43, 0x49, 0x84, 0xb1, 0xb3, 0x84, 0x8e, 0x3f, + 0xf0, 0x0b, 0x5c, 0xbb, 0x85, 0x8b, 0x82, 0xdd, 0xb6, 0xd1, 0x26, 0x94, 0x25, 0x34, 0xf1, 0xd9, + 0x14, 0x08, 0x52, 0x87, 0x0d, 0xff, 0xab, 0x90, 0xdb, 0x77, 0x5c, 0x22, 0x37, 0x7a, 0xaa, 0x03, + 0x98, 0x19, 0x60, 0x7b, 0x01, 0x73, 0x74, 0xb3, 0x18, 0x15, 0xea, 0xf8, 0xf8, 0x64, 0xda, 0x99, + 0x1c, 0x9f, 0xc8, 0x40, 0xe7, 0xc6, 0x27, 0x70, 0x6c, 0x7c, 0x82, 0x2d, 0xc6, 0x27, 0xa1, 0xc9, + 0xf1, 0x09, 0xd2, 0x4f, 0x65, 0x7c, 0x3b, 0x70, 0xa3, 0xe9, 0x9a, 0xd6, 0xa1, 0xeb, 0x04, 0x21, + 0xb1, 0x93, 0x1e, 0xe3, 0x01, 0x14, 0xce, 0x05, 0x9d, 0x97, 0x55, 0x34, 0x25, 0x52, 0xfd, 0xf7, + 0x0c, 0x54, 0xb6, 0x89, 0xe9, 0x86, 0x07, 0xb3, 0xb2, 0x51, 0x48, 0x82, 0x50, 0x5e, 0x56, 0xfc, + 0x37, 0xfa, 0x1a, 0x14, 0xe3, 0x98, 0xe4, 0xca, 0xb7, 0xb9, 0x18, 0x8a, 0x1e, 0xc2, 0x12, 0x3b, + 0x63, 0x74, 0x12, 0x25, 0x3b, 0x97, 0x3d, 0xfb, 0x48, 0x24, 0xbb, 0x64, 0x7c, 0xc2, 0x83, 0x10, + 0xbe, 0x95, 0xf2, 0x38, 0x6a, 0xa2, 0xff, 0x0f, 0x15, 0xfe, 0x6a, 0x11, 0xc5, 0x5c, 0xf9, 0xab, + 0x74, 0x96, 0xc5, 0xc3, 0xa3, 0x88, 0xb7, 0xfe, 0x38, 0x0b, 0x6b, 0x4f, 0xcd, 0xe9, 0x1e, 0x91, + 0x6e, 0x83, 0xd8, 0x98, 0x58, 0xd4, 0xb7, 0x51, 0x2f, 0xe9, 0x6e, 0x2e, 0x79, 0xc7, 0x4c, 0x13, + 0x4e, 0xf7, 0x3a, 0x51, 0x02, 0x96, 0x4d, 0x24, 0x60, 0x6b, 0x90, 0xf7, 0xa8, 0x67, 0x11, 0xe9, + 0x8b, 0x44, 0x43, 0xfd, 0xed, 0x4c, 0xd2, 0xd7, 0x54, 0xe3, 0x37, 0x46, 0x5e, 0x81, 0xea, 0xd0, + 0x30, 0xee, 0x0e, 0x7d, 0x04, 0xd5, 0xbe, 0xae, 0x61, 0x7d, 0xd0, 0xec, 0x7e, 0xdb, 0xe8, 0x37, + 0x76, 0xfa, 0x8d, 0x07, 0xf7, 0x8c, 0x5e, 0x77, 0xe7, 0x3b, 0xf7, 0x1f, 0xde, 0xfb, 0x9a, 0x92, + 0xa9, 0xd6, 0x8e, 0x4f, 0x6a, 0xb7, 0x3a, 0x0d, 0x6d, 0x47, 0x1c, 0x99, 0x3d, 0xfa, 0xb2, 0x6f, + 0xba, 0x81, 0xf9, 0xe0, 0x5e, 0x8f, 0xba, 0x53, 0x86, 0x41, 0x5f, 0x01, 0xb4, 0xa5, 0xe3, 0x8e, + 0x3e, 0x30, 0x22, 0x87, 0xa6, 0x35, 0x35, 0x25, 0x2b, 0xd2, 0x9a, 0x2d, 0xe2, 0x7b, 0x24, 0x6c, + 0xe8, 0xfd, 0xfb, 0x0f, 0x1e, 0x69, 0x4d, 0x8d, 0x1d, 0x82, 0x4a, 0xf2, 0x76, 0x4b, 0x5e, 0xda, + 0x99, 0x0b, 0x2f, 0xed, 0xd9, 0xdd, 0x9f, 0xbd, 0xe0, 0xee, 0xdf, 0x82, 0x35, 0xcb, 0xa7, 0x41, + 0x60, 0xb0, 0x5c, 0x81, 0xd8, 0x73, 0xd9, 0xc8, 0x17, 0xce, 0x4e, 0x37, 0xaf, 0x69, 0x8c, 0xdf, + 0xe7, 0x6c, 0xa9, 0xfe, 0x9a, 0x95, 0x20, 0xf1, 0x9e, 0xd4, 0xdf, 0x5f, 0x64, 0x61, 0x97, 0x73, + 0xe4, 0xb8, 0x64, 0x48, 0x02, 0xf4, 0x0c, 0x56, 0x2d, 0x9f, 0xd8, 0x2c, 0x09, 0x30, 0xdd, 0xe4, + 0xc7, 0xba, 0xff, 0x2f, 0x35, 0x02, 0x8a, 0x05, 0xeb, 0x5a, 0x2c, 0xd5, 0x1f, 0x13, 0x0b, 0xaf, + 0x58, 0xe7, 0xda, 0xe8, 0x63, 0x58, 0x0d, 0x88, 0xeb, 0x78, 0x93, 0x97, 0x86, 0x45, 0xbd, 0x90, + 0xbc, 0x8c, 0xde, 0xd6, 0xae, 0xd2, 0xdb, 0xd7, 0x77, 0x98, 0x94, 0x26, 0x84, 0x9a, 0xe8, 0xec, + 0x74, 0x73, 0xe5, 0x3c, 0x0d, 0xaf, 0x48, 0xcd, 0xb2, 0x5d, 0xed, 0xc0, 0xca, 0xf9, 0xd1, 0xa0, + 0x35, 0xe9, 0x29, 0xb8, 0xc3, 0x89, 0x3c, 0x01, 0xba, 0x05, 0x45, 0x9f, 0x0c, 0x9d, 0x20, 0xf4, + 0x85, 0x99, 0x19, 0x27, 0xa6, 0x30, 0x3f, 0x21, 0xbe, 0xa5, 0xaa, 0xfe, 0x12, 0xcc, 0xf5, 0xc8, + 0x8e, 0x96, 0xed, 0x04, 0xe6, 0x9e, 0x54, 0x59, 0xc4, 0x51, 0x93, 0xed, 0xd8, 0x49, 0x10, 0x87, + 0x75, 0xfc, 0x37, 0xa3, 0xf1, 0xf8, 0x43, 0x7e, 0x59, 0xc6, 0x23, 0x8c, 0xe8, 0x03, 0xd6, 0x5c, + 0xe2, 0x03, 0xd6, 0x35, 0xc8, 0xbb, 0xe4, 0x88, 0xb8, 0xe2, 0xe6, 0xc7, 0xa2, 0x71, 0xe7, 0x1e, + 0x54, 0xa2, 0x2f, 0x25, 0xf9, 0x37, 0x18, 0x45, 0xc8, 0x0d, 0x1a, 0xfd, 0x27, 0xca, 0x02, 0x02, + 0x28, 0x88, 0x9d, 0x2c, 0xde, 0xfd, 0xb4, 0x6e, 0x67, 0xab, 0xfd, 0x58, 0xc9, 0xde, 0xf9, 0x9d, + 0x1c, 0x94, 0xe2, 0x97, 0x27, 0x76, 0xd3, 0x74, 0xf4, 0xe7, 0xd1, 0x51, 0x88, 0xe9, 0x1d, 0xf2, + 0x02, 0xbd, 0x31, 0xab, 0x59, 0x7d, 0x24, 0x9e, 0xda, 0x63, 0x76, 0x54, 0xaf, 0x7a, 0x0b, 0x8a, + 0x8d, 0x7e, 0xbf, 0xfd, 0xb8, 0xa3, 0xb7, 0x94, 0x4f, 0x33, 0xd5, 0x2f, 0x1c, 0x9f, 0xd4, 0xae, + 0xc5, 0xa0, 0x46, 0x20, 0x36, 0x1f, 0x47, 0x69, 0x9a, 0xde, 0x1b, 0xe8, 0x2d, 0xe5, 0x93, 0xec, + 0x3c, 0x8a, 0xd7, 0x60, 0xf8, 0x47, 0x40, 0xa5, 0x1e, 0xd6, 0x7b, 0x0d, 0xcc, 0x3a, 0xfc, 0x34, + 0x2b, 0x4a, 0x69, 0xb3, 0x1e, 0x7d, 0x32, 0x36, 0x7d, 0xd6, 0xe7, 0x46, 0xf4, 0x55, 0xdd, 0x27, + 0x8b, 0xe2, 0x43, 0x91, 0xd9, 0x33, 0x1a, 0x31, 0xed, 0x29, 0xeb, 0x8d, 0xbf, 0x5f, 0x72, 0x35, + 0x8b, 0x73, 0xbd, 0xf5, 0x99, 0xa7, 0x62, 0x5a, 0x54, 0x58, 0xc2, 0xbb, 0x9d, 0x0e, 0x03, 0x7d, + 0x92, 0x9b, 0x9b, 0x1d, 0x9e, 0x78, 0x2c, 0xbf, 0x46, 0xb7, 0xa1, 0x18, 0x3d, 0x6f, 0x2a, 0x9f, + 0xe6, 0xe6, 0x06, 0xa4, 0x45, 0x6f, 0xb3, 0xbc, 0xc3, 0xed, 0xdd, 0x01, 0xff, 0xe8, 0xef, 0x93, + 0xfc, 0x7c, 0x87, 0x07, 0x93, 0xd0, 0xa6, 0x2f, 0x3c, 0x76, 0x66, 0x65, 0xd5, 0xee, 0xd3, 0xbc, + 0xf0, 0x05, 0x31, 0x46, 0x96, 0xec, 0xde, 0x82, 0x22, 0xd6, 0xbf, 0x25, 0xbe, 0x0f, 0xfc, 0xa4, + 0x30, 0xa7, 0x07, 0x93, 0x8f, 0x89, 0xc5, 0x7a, 0xab, 0x41, 0x01, 0xeb, 0x4f, 0xbb, 0xcf, 0x74, + 0xe5, 0x0f, 0x0a, 0x73, 0x7a, 0x30, 0x19, 0x51, 0xfe, 0x95, 0x54, 0xb1, 0x8b, 0x7b, 0xdb, 0x0d, + 0xbe, 0x28, 0xf3, 0x7a, 0xba, 0xfe, 0xf8, 0xc0, 0xf4, 0x88, 0x3d, 0xfb, 0x9e, 0x26, 0x66, 0xdd, + 0xf9, 0x39, 0x28, 0x46, 0x91, 0x2e, 0xda, 0x80, 0xc2, 0xf3, 0x2e, 0x7e, 0xa2, 0x63, 0x65, 0x41, + 0x58, 0x39, 0xe2, 0x3c, 0x17, 0x39, 0x4a, 0x0d, 0x96, 0x9e, 0x36, 0x3a, 0x8d, 0xc7, 0x3a, 0x8e, + 0x4a, 0xee, 0x11, 0x40, 0x86, 0x6b, 0x55, 0x45, 0x76, 0x10, 0xeb, 0x6c, 0xae, 0x7f, 0xff, 0x47, + 0x1b, 0x0b, 0x3f, 0xfc, 0xd1, 0xc6, 0xc2, 0x27, 0x67, 0x1b, 0x99, 0xef, 0x9f, 0x6d, 0x64, 0xfe, + 0xfe, 0x6c, 0x23, 0xf3, 0x6f, 0x67, 0x1b, 0x99, 0xbd, 0x02, 0xbf, 0x54, 0x1e, 0xfe, 0x6f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x80, 0x94, 0x1b, 0x9c, 0x7a, 0x32, 0x00, 0x00, } diff --git a/vendor/github.com/docker/swarmkit/api/watch.pb.go b/vendor/github.com/docker/swarmkit/api/watch.pb.go index 78f0014c7..9d152514c 100644 --- a/vendor/github.com/docker/swarmkit/api/watch.pb.go +++ b/vendor/github.com/docker/swarmkit/api/watch.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/watch.proto -// DO NOT EDIT! package api @@ -10,18 +9,16 @@ import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/docker/swarmkit/protobuf/plugin" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" import raftselector "github.com/docker/swarmkit/manager/raftselector" import codes "google.golang.org/grpc/codes" import status "google.golang.org/grpc/status" import metadata "google.golang.org/grpc/metadata" -import transport "google.golang.org/grpc/transport" +import peer "google.golang.org/grpc/peer" import rafttime "time" import strings "strings" @@ -1049,55 +1046,55 @@ func (m *Object) CopyFrom(src interface{}) { v := Object_Node{ Node: &Node{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Node, o.GetNode()) + deepcopy.Copy(v.Node, o.GetNode()) m.Object = &v case *Object_Service: v := Object_Service{ Service: &Service{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Service, o.GetService()) + deepcopy.Copy(v.Service, o.GetService()) m.Object = &v case *Object_Network: v := Object_Network{ Network: &Network{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Network, o.GetNetwork()) + deepcopy.Copy(v.Network, o.GetNetwork()) m.Object = &v case *Object_Task: v := Object_Task{ Task: &Task{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Task, o.GetTask()) + deepcopy.Copy(v.Task, o.GetTask()) m.Object = &v case *Object_Cluster: v := Object_Cluster{ Cluster: &Cluster{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Cluster, o.GetCluster()) + deepcopy.Copy(v.Cluster, o.GetCluster()) m.Object = &v case *Object_Secret: v := Object_Secret{ Secret: &Secret{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Secret, o.GetSecret()) + deepcopy.Copy(v.Secret, o.GetSecret()) m.Object = &v case *Object_Resource: v := Object_Resource{ Resource: &Resource{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Resource, o.GetResource()) + deepcopy.Copy(v.Resource, o.GetResource()) m.Object = &v case *Object_Extension: v := Object_Extension{ Extension: &Extension{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Extension, o.GetExtension()) + deepcopy.Copy(v.Extension, o.GetExtension()) m.Object = &v case *Object_Config: v := Object_Config{ Config: &Config{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig()) + deepcopy.Copy(v.Config, o.GetConfig()) m.Object = &v } } @@ -1173,13 +1170,13 @@ func (m *SelectBy) CopyFrom(src interface{}) { v := SelectBy_Custom{ Custom: &SelectByCustom{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Custom, o.GetCustom()) + deepcopy.Copy(v.Custom, o.GetCustom()) m.By = &v case *SelectBy_CustomPrefix: v := SelectBy_CustomPrefix{ CustomPrefix: &SelectByCustom{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.CustomPrefix, o.GetCustomPrefix()) + deepcopy.Copy(v.CustomPrefix, o.GetCustomPrefix()) m.By = &v case *SelectBy_ServiceID: v := SelectBy_ServiceID{ @@ -1195,7 +1192,7 @@ func (m *SelectBy) CopyFrom(src interface{}) { v := SelectBy_Slot{ Slot: &SelectBySlot{}, } - github_com_docker_swarmkit_api_deepcopy.Copy(v.Slot, o.GetSlot()) + deepcopy.Copy(v.Slot, o.GetSlot()) m.By = &v case *SelectBy_DesiredState: v := SelectBy_DesiredState{ @@ -1254,13 +1251,13 @@ func (m *WatchRequest) CopyFrom(src interface{}) { m.Entries = make([]*WatchRequest_WatchEntry, len(o.Entries)) for i := range m.Entries { m.Entries[i] = &WatchRequest_WatchEntry{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Entries[i], o.Entries[i]) + deepcopy.Copy(m.Entries[i], o.Entries[i]) } } if o.ResumeFrom != nil { m.ResumeFrom = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.ResumeFrom, o.ResumeFrom) + deepcopy.Copy(m.ResumeFrom, o.ResumeFrom) } } @@ -1281,7 +1278,7 @@ func (m *WatchRequest_WatchEntry) CopyFrom(src interface{}) { m.Filters = make([]*SelectBy, len(o.Filters)) for i := range m.Filters { m.Filters[i] = &SelectBy{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters[i], o.Filters[i]) + deepcopy.Copy(m.Filters[i], o.Filters[i]) } } @@ -1304,13 +1301,13 @@ func (m *WatchMessage) CopyFrom(src interface{}) { m.Events = make([]*WatchMessage_Event, len(o.Events)) for i := range m.Events { m.Events[i] = &WatchMessage_Event{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Events[i], o.Events[i]) + deepcopy.Copy(m.Events[i], o.Events[i]) } } if o.Version != nil { m.Version = &Version{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Version, o.Version) + deepcopy.Copy(m.Version, o.Version) } } @@ -1329,11 +1326,11 @@ func (m *WatchMessage_Event) CopyFrom(src interface{}) { *m = *o if o.Object != nil { m.Object = &Object{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.Object, o.Object) + deepcopy.Copy(m.Object, o.Object) } if o.OldObject != nil { m.OldObject = &Object{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.OldObject, o.OldObject) + deepcopy.Copy(m.OldObject, o.OldObject) } } @@ -2006,24 +2003,6 @@ func (m *WatchMessage_Event) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Watch(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Watch(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintWatch(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2042,12 +2021,12 @@ type raftProxyWatchServer struct { func NewRaftProxyWatchServer(local WatchServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) WatchServer { redirectChecker := func(ctx context.Context) (context.Context, error) { - s, ok := transport.StreamFromContext(ctx) + p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } - addr := s.ServerTransport().RemoteAddr().String() - md, ok := metadata.FromContext(ctx) + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } @@ -2055,7 +2034,7 @@ func NewRaftProxyWatchServer(local WatchServer, connSelector raftselector.ConnPr md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) - return metadata.NewContext(ctx, md), nil + return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) diff --git a/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go b/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go index 549cebaea..0d08eb6eb 100644 --- a/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go +++ b/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/protobuf/plugin/plugin.proto -// DO NOT EDIT! /* Package plugin is a generated protocol buffer package. @@ -20,9 +19,7 @@ import fmt "fmt" import math "math" import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" -import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" - -import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import deepcopy "github.com/docker/swarmkit/api/deepcopy" import strings "strings" import reflect "reflect" @@ -154,7 +151,7 @@ func (m *StoreObject) CopyFrom(src interface{}) { *m = *o if o.WatchSelectors != nil { m.WatchSelectors = &WatchSelectors{} - github_com_docker_swarmkit_api_deepcopy.Copy(m.WatchSelectors, o.WatchSelectors) + deepcopy.Copy(m.WatchSelectors, o.WatchSelectors) } } @@ -345,7 +342,7 @@ func (m *StoreObject) MarshalTo(dAtA []byte) (int, error) { var l int _ = l if m.WatchSelectors == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("watch_selectors") + return 0, proto.NewRequiredNotSetError("watch_selectors") } else { dAtA[i] = 0xa i++ @@ -408,24 +405,6 @@ func (m *TLSAuthorization) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -987,7 +966,7 @@ func (m *StoreObject) Unmarshal(dAtA []byte) error { } } if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("watch_selectors") + return proto.NewRequiredNotSetError("watch_selectors") } if iNdEx > l { diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 000000000..e474cd075 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,223 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) + return +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 000000000..555225a21 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,257 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // If the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) + return +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 000000000..2547df979 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 000000000..c8d9b0a23 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,110 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru simplelru.LRUCache + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { + lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) + if err != nil { + return nil, err + } + c := &Cache{ + lru: lru, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *Cache) Purge() { + c.lock.Lock() + c.lru.Purge() + c.lock.Unlock() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) (evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Add(key, value) +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Get(key) +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Contains(key) +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Peek(key) +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lru.Contains(key) { + return true, false + } + evicted = c.lru.Add(key, value) + return false, evicted +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) { + c.lock.Lock() + c.lru.Remove(key) + c.lock.Unlock() +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + c.lru.RemoveOldest() + c.lock.Unlock() +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Keys() +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Len() +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 000000000..5673773b2 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,161 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..744cac01c --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,37 @@ +package simplelru + + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 000000000..5d8cb5b72 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 000000000..258c0636a --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 000000000..c318385cb --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 000000000..8fb59ad22 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE new file mode 100644 index 000000000..1c2640164 --- /dev/null +++ b/vendor/github.com/morikuni/aec/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Taihei Morikuni + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go new file mode 100644 index 000000000..566be6eb1 --- /dev/null +++ b/vendor/github.com/morikuni/aec/aec.go @@ -0,0 +1,137 @@ +package aec + +import "fmt" + +// EraseMode is listed in a variable EraseModes. +type EraseMode uint + +var ( + // EraseModes is a list of EraseMode. + EraseModes struct { + // All erase all. + All EraseMode + + // Head erase to head. + Head EraseMode + + // Tail erase to tail. + Tail EraseMode + } + + // Save saves the cursor position. + Save ANSI + + // Restore restores the cursor position. + Restore ANSI + + // Hide hides the cursor. + Hide ANSI + + // Show shows the cursor. + Show ANSI + + // Report reports the cursor position. + Report ANSI +) + +// Up moves up the cursor. +func Up(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dA", n)) +} + +// Down moves down the cursor. +func Down(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dB", n)) +} + +// Right moves right the cursor. +func Right(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dC", n)) +} + +// Left moves left the cursor. +func Left(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dD", n)) +} + +// NextLine moves down the cursor to head of a line. +func NextLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dE", n)) +} + +// PreviousLine moves up the cursor to head of a line. +func PreviousLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dF", n)) +} + +// Column set the cursor position to a given column. +func Column(col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dG", col)) +} + +// Position set the cursor position to a given absolute position. +func Position(row, col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) +} + +// EraseDisplay erases display by given EraseMode. +func EraseDisplay(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dJ", m)) +} + +// EraseLine erases lines by given EraseMode. +func EraseLine(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dK", m)) +} + +// ScrollUp scrolls up the page. +func ScrollUp(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dS", n)) +} + +// ScrollDown scrolls down the page. +func ScrollDown(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dT", n)) +} + +func init() { + EraseModes = struct { + All EraseMode + Head EraseMode + Tail EraseMode + }{ + Tail: 0, + Head: 1, + All: 2, + } + + Save = newAnsi(esc + "s") + Restore = newAnsi(esc + "u") + Hide = newAnsi(esc + "?25l") + Show = newAnsi(esc + "?25h") + Report = newAnsi(esc + "6n") +} diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go new file mode 100644 index 000000000..e60722e6e --- /dev/null +++ b/vendor/github.com/morikuni/aec/ansi.go @@ -0,0 +1,59 @@ +package aec + +import ( + "fmt" + "strings" +) + +const esc = "\x1b[" + +// Reset resets SGR effect. +const Reset string = "\x1b[0m" + +var empty = newAnsi("") + +// ANSI represents ANSI escape code. +type ANSI interface { + fmt.Stringer + + // With adapts given ANSIs. + With(...ANSI) ANSI + + // Apply wraps given string in ANSI. + Apply(string) string +} + +type ansiImpl string + +func newAnsi(s string) *ansiImpl { + r := ansiImpl(s) + return &r +} + +func (a *ansiImpl) With(ansi ...ANSI) ANSI { + return concat(append([]ANSI{a}, ansi...)) +} + +func (a *ansiImpl) Apply(s string) string { + return a.String() + s + Reset +} + +func (a *ansiImpl) String() string { + return string(*a) +} + +// Apply wraps given string in ANSIs. +func Apply(s string, ansi ...ANSI) string { + if len(ansi) == 0 { + return s + } + return concat(ansi).Apply(s) +} + +func concat(ansi []ANSI) ANSI { + strs := make([]string, 0, len(ansi)) + for _, p := range ansi { + strs = append(strs, p.String()) + } + return newAnsi(strings.Join(strs, "")) +} diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go new file mode 100644 index 000000000..13bd002d4 --- /dev/null +++ b/vendor/github.com/morikuni/aec/builder.go @@ -0,0 +1,388 @@ +package aec + +// Builder is a lightweight syntax to construct customized ANSI. +type Builder struct { + ANSI ANSI +} + +// EmptyBuilder is an initialized Builder. +var EmptyBuilder *Builder + +// NewBuilder creates a Builder from existing ANSI. +func NewBuilder(a ...ANSI) *Builder { + return &Builder{concat(a)} +} + +// With is a syntax for With. +func (builder *Builder) With(a ...ANSI) *Builder { + return NewBuilder(builder.ANSI.With(a...)) +} + +// Up is a syntax for Up. +func (builder *Builder) Up(n uint) *Builder { + return builder.With(Up(n)) +} + +// Down is a syntax for Down. +func (builder *Builder) Down(n uint) *Builder { + return builder.With(Down(n)) +} + +// Right is a syntax for Right. +func (builder *Builder) Right(n uint) *Builder { + return builder.With(Right(n)) +} + +// Left is a syntax for Left. +func (builder *Builder) Left(n uint) *Builder { + return builder.With(Left(n)) +} + +// NextLine is a syntax for NextLine. +func (builder *Builder) NextLine(n uint) *Builder { + return builder.With(NextLine(n)) +} + +// PreviousLine is a syntax for PreviousLine. +func (builder *Builder) PreviousLine(n uint) *Builder { + return builder.With(PreviousLine(n)) +} + +// Column is a syntax for Column. +func (builder *Builder) Column(col uint) *Builder { + return builder.With(Column(col)) +} + +// Position is a syntax for Position. +func (builder *Builder) Position(row, col uint) *Builder { + return builder.With(Position(row, col)) +} + +// EraseDisplay is a syntax for EraseDisplay. +func (builder *Builder) EraseDisplay(m EraseMode) *Builder { + return builder.With(EraseDisplay(m)) +} + +// EraseLine is a syntax for EraseLine. +func (builder *Builder) EraseLine(m EraseMode) *Builder { + return builder.With(EraseLine(m)) +} + +// ScrollUp is a syntax for ScrollUp. +func (builder *Builder) ScrollUp(n int) *Builder { + return builder.With(ScrollUp(n)) +} + +// ScrollDown is a syntax for ScrollDown. +func (builder *Builder) ScrollDown(n int) *Builder { + return builder.With(ScrollDown(n)) +} + +// Save is a syntax for Save. +func (builder *Builder) Save() *Builder { + return builder.With(Save) +} + +// Restore is a syntax for Restore. +func (builder *Builder) Restore() *Builder { + return builder.With(Restore) +} + +// Hide is a syntax for Hide. +func (builder *Builder) Hide() *Builder { + return builder.With(Hide) +} + +// Show is a syntax for Show. +func (builder *Builder) Show() *Builder { + return builder.With(Show) +} + +// Report is a syntax for Report. +func (builder *Builder) Report() *Builder { + return builder.With(Report) +} + +// Bold is a syntax for Bold. +func (builder *Builder) Bold() *Builder { + return builder.With(Bold) +} + +// Faint is a syntax for Faint. +func (builder *Builder) Faint() *Builder { + return builder.With(Faint) +} + +// Italic is a syntax for Italic. +func (builder *Builder) Italic() *Builder { + return builder.With(Italic) +} + +// Underline is a syntax for Underline. +func (builder *Builder) Underline() *Builder { + return builder.With(Underline) +} + +// BlinkSlow is a syntax for BlinkSlow. +func (builder *Builder) BlinkSlow() *Builder { + return builder.With(BlinkSlow) +} + +// BlinkRapid is a syntax for BlinkRapid. +func (builder *Builder) BlinkRapid() *Builder { + return builder.With(BlinkRapid) +} + +// Inverse is a syntax for Inverse. +func (builder *Builder) Inverse() *Builder { + return builder.With(Inverse) +} + +// Conceal is a syntax for Conceal. +func (builder *Builder) Conceal() *Builder { + return builder.With(Conceal) +} + +// CrossOut is a syntax for CrossOut. +func (builder *Builder) CrossOut() *Builder { + return builder.With(CrossOut) +} + +// BlackF is a syntax for BlackF. +func (builder *Builder) BlackF() *Builder { + return builder.With(BlackF) +} + +// RedF is a syntax for RedF. +func (builder *Builder) RedF() *Builder { + return builder.With(RedF) +} + +// GreenF is a syntax for GreenF. +func (builder *Builder) GreenF() *Builder { + return builder.With(GreenF) +} + +// YellowF is a syntax for YellowF. +func (builder *Builder) YellowF() *Builder { + return builder.With(YellowF) +} + +// BlueF is a syntax for BlueF. +func (builder *Builder) BlueF() *Builder { + return builder.With(BlueF) +} + +// MagentaF is a syntax for MagentaF. +func (builder *Builder) MagentaF() *Builder { + return builder.With(MagentaF) +} + +// CyanF is a syntax for CyanF. +func (builder *Builder) CyanF() *Builder { + return builder.With(CyanF) +} + +// WhiteF is a syntax for WhiteF. +func (builder *Builder) WhiteF() *Builder { + return builder.With(WhiteF) +} + +// DefaultF is a syntax for DefaultF. +func (builder *Builder) DefaultF() *Builder { + return builder.With(DefaultF) +} + +// BlackB is a syntax for BlackB. +func (builder *Builder) BlackB() *Builder { + return builder.With(BlackB) +} + +// RedB is a syntax for RedB. +func (builder *Builder) RedB() *Builder { + return builder.With(RedB) +} + +// GreenB is a syntax for GreenB. +func (builder *Builder) GreenB() *Builder { + return builder.With(GreenB) +} + +// YellowB is a syntax for YellowB. +func (builder *Builder) YellowB() *Builder { + return builder.With(YellowB) +} + +// BlueB is a syntax for BlueB. +func (builder *Builder) BlueB() *Builder { + return builder.With(BlueB) +} + +// MagentaB is a syntax for MagentaB. +func (builder *Builder) MagentaB() *Builder { + return builder.With(MagentaB) +} + +// CyanB is a syntax for CyanB. +func (builder *Builder) CyanB() *Builder { + return builder.With(CyanB) +} + +// WhiteB is a syntax for WhiteB. +func (builder *Builder) WhiteB() *Builder { + return builder.With(WhiteB) +} + +// DefaultB is a syntax for DefaultB. +func (builder *Builder) DefaultB() *Builder { + return builder.With(DefaultB) +} + +// Frame is a syntax for Frame. +func (builder *Builder) Frame() *Builder { + return builder.With(Frame) +} + +// Encircle is a syntax for Encircle. +func (builder *Builder) Encircle() *Builder { + return builder.With(Encircle) +} + +// Overline is a syntax for Overline. +func (builder *Builder) Overline() *Builder { + return builder.With(Overline) +} + +// LightBlackF is a syntax for LightBlueF. +func (builder *Builder) LightBlackF() *Builder { + return builder.With(LightBlackF) +} + +// LightRedF is a syntax for LightRedF. +func (builder *Builder) LightRedF() *Builder { + return builder.With(LightRedF) +} + +// LightGreenF is a syntax for LightGreenF. +func (builder *Builder) LightGreenF() *Builder { + return builder.With(LightGreenF) +} + +// LightYellowF is a syntax for LightYellowF. +func (builder *Builder) LightYellowF() *Builder { + return builder.With(LightYellowF) +} + +// LightBlueF is a syntax for LightBlueF. +func (builder *Builder) LightBlueF() *Builder { + return builder.With(LightBlueF) +} + +// LightMagentaF is a syntax for LightMagentaF. +func (builder *Builder) LightMagentaF() *Builder { + return builder.With(LightMagentaF) +} + +// LightCyanF is a syntax for LightCyanF. +func (builder *Builder) LightCyanF() *Builder { + return builder.With(LightCyanF) +} + +// LightWhiteF is a syntax for LightWhiteF. +func (builder *Builder) LightWhiteF() *Builder { + return builder.With(LightWhiteF) +} + +// LightBlackB is a syntax for LightBlackB. +func (builder *Builder) LightBlackB() *Builder { + return builder.With(LightBlackB) +} + +// LightRedB is a syntax for LightRedB. +func (builder *Builder) LightRedB() *Builder { + return builder.With(LightRedB) +} + +// LightGreenB is a syntax for LightGreenB. +func (builder *Builder) LightGreenB() *Builder { + return builder.With(LightGreenB) +} + +// LightYellowB is a syntax for LightYellowB. +func (builder *Builder) LightYellowB() *Builder { + return builder.With(LightYellowB) +} + +// LightBlueB is a syntax for LightBlueB. +func (builder *Builder) LightBlueB() *Builder { + return builder.With(LightBlueB) +} + +// LightMagentaB is a syntax for LightMagentaB. +func (builder *Builder) LightMagentaB() *Builder { + return builder.With(LightMagentaB) +} + +// LightCyanB is a syntax for LightCyanB. +func (builder *Builder) LightCyanB() *Builder { + return builder.With(LightCyanB) +} + +// LightWhiteB is a syntax for LightWhiteB. +func (builder *Builder) LightWhiteB() *Builder { + return builder.With(LightWhiteB) +} + +// Color3BitF is a syntax for Color3BitF. +func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { + return builder.With(Color3BitF(c)) +} + +// Color3BitB is a syntax for Color3BitB. +func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { + return builder.With(Color3BitB(c)) +} + +// Color8BitF is a syntax for Color8BitF. +func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { + return builder.With(Color8BitF(c)) +} + +// Color8BitB is a syntax for Color8BitB. +func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { + return builder.With(Color8BitB(c)) +} + +// FullColorF is a syntax for FullColorF. +func (builder *Builder) FullColorF(r, g, b uint8) *Builder { + return builder.With(FullColorF(r, g, b)) +} + +// FullColorB is a syntax for FullColorB. +func (builder *Builder) FullColorB(r, g, b uint8) *Builder { + return builder.With(FullColorB(r, g, b)) +} + +// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. +func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { + return builder.Color3BitF(NewRGB3Bit(r, g, b)) +} + +// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. +func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { + return builder.Color3BitB(NewRGB3Bit(r, g, b)) +} + +// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. +func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { + return builder.Color8BitF(NewRGB8Bit(r, g, b)) +} + +// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. +func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { + return builder.Color8BitB(NewRGB8Bit(r, g, b)) +} + +func init() { + EmptyBuilder = &Builder{empty} +} diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go new file mode 100644 index 000000000..0ba3464e6 --- /dev/null +++ b/vendor/github.com/morikuni/aec/sgr.go @@ -0,0 +1,202 @@ +package aec + +import ( + "fmt" +) + +// RGB3Bit is a 3bit RGB color. +type RGB3Bit uint8 + +// RGB8Bit is a 8bit RGB color. +type RGB8Bit uint8 + +func newSGR(n uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", n)) +} + +// NewRGB3Bit create a RGB3Bit from given RGB. +func NewRGB3Bit(r, g, b uint8) RGB3Bit { + return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) +} + +// NewRGB8Bit create a RGB8Bit from given RGB. +func NewRGB8Bit(r, g, b uint8) RGB8Bit { + return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) +} + +// Color3BitF set the foreground color of text. +func Color3BitF(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) +} + +// Color3BitB set the background color of text. +func Color3BitB(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) +} + +// Color8BitF set the foreground color of text. +func Color8BitF(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) +} + +// Color8BitB set the background color of text. +func Color8BitB(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) +} + +// FullColorF set the foreground color of text. +func FullColorF(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) +} + +// FullColorB set the foreground color of text. +func FullColorB(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) +} + +// Style +var ( + // Bold set the text style to bold or increased intensity. + Bold ANSI + + // Faint set the text style to faint. + Faint ANSI + + // Italic set the text style to italic. + Italic ANSI + + // Underline set the text style to underline. + Underline ANSI + + // BlinkSlow set the text style to slow blink. + BlinkSlow ANSI + + // BlinkRapid set the text style to rapid blink. + BlinkRapid ANSI + + // Inverse swap the foreground color and background color. + Inverse ANSI + + // Conceal set the text style to conceal. + Conceal ANSI + + // CrossOut set the text style to crossed out. + CrossOut ANSI + + // Frame set the text style to framed. + Frame ANSI + + // Encircle set the text style to encircled. + Encircle ANSI + + // Overline set the text style to overlined. + Overline ANSI +) + +// Foreground color of text. +var ( + // DefaultF is the default color of foreground. + DefaultF ANSI + + // Normal color + BlackF ANSI + RedF ANSI + GreenF ANSI + YellowF ANSI + BlueF ANSI + MagentaF ANSI + CyanF ANSI + WhiteF ANSI + + // Light color + LightBlackF ANSI + LightRedF ANSI + LightGreenF ANSI + LightYellowF ANSI + LightBlueF ANSI + LightMagentaF ANSI + LightCyanF ANSI + LightWhiteF ANSI +) + +// Background color of text. +var ( + // DefaultB is the default color of background. + DefaultB ANSI + + // Normal color + BlackB ANSI + RedB ANSI + GreenB ANSI + YellowB ANSI + BlueB ANSI + MagentaB ANSI + CyanB ANSI + WhiteB ANSI + + // Light color + LightBlackB ANSI + LightRedB ANSI + LightGreenB ANSI + LightYellowB ANSI + LightBlueB ANSI + LightMagentaB ANSI + LightCyanB ANSI + LightWhiteB ANSI +) + +func init() { + Bold = newSGR(1) + Faint = newSGR(2) + Italic = newSGR(3) + Underline = newSGR(4) + BlinkSlow = newSGR(5) + BlinkRapid = newSGR(6) + Inverse = newSGR(7) + Conceal = newSGR(8) + CrossOut = newSGR(9) + + BlackF = newSGR(30) + RedF = newSGR(31) + GreenF = newSGR(32) + YellowF = newSGR(33) + BlueF = newSGR(34) + MagentaF = newSGR(35) + CyanF = newSGR(36) + WhiteF = newSGR(37) + + DefaultF = newSGR(39) + + BlackB = newSGR(40) + RedB = newSGR(41) + GreenB = newSGR(42) + YellowB = newSGR(43) + BlueB = newSGR(44) + MagentaB = newSGR(45) + CyanB = newSGR(46) + WhiteB = newSGR(47) + + DefaultB = newSGR(49) + + Frame = newSGR(51) + Encircle = newSGR(52) + Overline = newSGR(53) + + LightBlackF = newSGR(90) + LightRedF = newSGR(91) + LightGreenF = newSGR(92) + LightYellowF = newSGR(93) + LightBlueF = newSGR(94) + LightMagentaF = newSGR(95) + LightCyanF = newSGR(96) + LightWhiteF = newSGR(97) + + LightBlackB = newSGR(100) + LightRedB = newSGR(101) + LightGreenB = newSGR(102) + LightYellowB = newSGR(103) + LightBlueB = newSGR(104) + LightMagentaB = newSGR(105) + LightCyanB = newSGR(106) + LightWhiteB = newSGR(107) +} diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md new file mode 100644 index 000000000..c5275d5ab --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/AUTHORS.md @@ -0,0 +1,18 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Bernerd Schaefer +* Björn Rabenstein +* Daniel Bornkessel +* Jeff Younker +* Julius Volz +* Matt T. Proud +* Tobias Schmidt + diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 000000000..dd878a30e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 000000000..623d3d83f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. + Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 000000000..ee37949ad --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,172 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Set is used to set the Counter to an arbitrary value. It is only used + // if you have to transfer a value from an external counter into this + // Prometheus metric. Do not use it for regular handling of a + // Prometheus counter (as it can be used to break the contract of + // monotonically increasing values). + // + // Deprecated: Use NewConstMetric to create a counter for an external + // value. A Counter should never be set. + Set(float64) + // Inc increments the counter by 1. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + value +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. +type CounterVec struct { + *MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.MetricVec.WithLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.MetricVec.With(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 000000000..77f4b30e8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,205 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +var ( + metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) + labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occured during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !metricNameRE.MatchString(fqName) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} + +func checkLabelName(l string) bool { + return labelNameRE.MatchString(l) && + !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 000000000..b15a2d3b9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,181 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). +// +// All exported functions and methods are safe to be used concurrently unless +//specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":8080", nil) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, +// HistogramOpts, or UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might +// cause. As suggested by the name, MustRegister panics if an error occurs. With +// the Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data +// model. Inconsistencies are ideally detected at registration time, not at +// collect time. The former will usually be detected at start-up time of a +// program, while the latter will only happen at scrape time, possibly not even +// on the first scrape if the inconsistency only becomes relevant later. That is +// the main reason why a Collector and a Metric have to describe themselves to +// the registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in +// the same way on a custom registry as the global functions Register and +// Unregister on the default registry. +// +// There are a number of uses for custom registries: You can use registries +// with special properties, see NewPedanticRegistry. You can avoid global state, +// as it is imposed by the DefaultRegistry. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegistry comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp +// sub-package. (The top-level functions in the prometheus package are +// deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added. Sending metrics to +// Graphite would be an example that will soon be implemented. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 000000000..18a99d5fa --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 000000000..e3b67df8a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 000000000..8b70e5141 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,140 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. + Inc() + // Dec decrements the Gauge by 1. + Dec() + // Add adds the given value to the Gauge. (The value can be + // negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.MetricVec.WithLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.MetricVec.With(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 000000000..abc9d4ec4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,263 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutines Gauge + gcDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() Collector { + return &goCollector{ + goroutines: NewGauge(GaugeOpts{ + Namespace: "go", + Name: "goroutines", + Help: "Number of goroutines that currently exist.", + }), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained by system. Sum of all system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes_total"), + "Total number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutines.Desc() + ch <- c.gcDesc + + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + c.goroutines.Set(float64(runtime.NumGoroutine())) + ch <- c.goroutines + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 000000000..9719e8fac --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,444 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Histogram. Histograms with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // HistogramVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Histograms with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + selfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Histogram and not a +// Metric so that no type conversion is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Histogram and not a Metric so that no +// type conversion is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { + return m.MetricVec.WithLabelValues(lvs...).(Histogram) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Histogram { + return m.MetricVec.With(labels).(Histogram) +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 000000000..67ee5ac79 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,490 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead +// (which is non instrumented). +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// Upcoming versions of this package will provide ways of instrumenting HTTP +// handlers that are more flexible and have fewer issues. Please prefer direct +// instrumentation in the meantime. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + + regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) + regReqDur := MustRegisterOrGet(reqDur).(Summary) + regReqSz := MustRegisterOrGet(reqSz).(Summary) + regResSz := MustRegisterOrGet(resSz).(Summary) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := make(chan int) + urlLen := 0 + if r.URL != nil { + urlLen = len(r.URL.String()) + } + go computeApproximateRequestSize(r, out, urlLen) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + regReqCnt.WithLabelValues(method, code).Inc() + regReqDur.Observe(elapsed) + regResSz.Observe(float64(delegate.written)) + regReqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request, out chan int, s int) { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 000000000..d4063d98f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 000000000..e31e62e78 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,142 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal Counter + openFDs, maxFDs Gauge + vsize, rss Gauge + startTime Gauge +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewCounter(CounterOpts{ + Namespace: namespace, + Name: "process_cpu_seconds_total", + Help: "Total user and system CPU time spent in seconds.", + }), + openFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_open_fds", + Help: "Number of open file descriptors.", + }), + maxFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_max_fds", + Help: "Maximum number of open file descriptors.", + }), + vsize: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_virtual_memory_bytes", + Help: "Virtual memory size in bytes.", + }), + rss: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_resident_memory_bytes", + Help: "Resident memory size in bytes.", + }), + startTime: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal.Desc() + ch <- c.openFDs.Desc() + ch <- c.maxFDs.Desc() + ch <- c.vsize.Desc() + ch <- c.rss.Desc() + ch <- c.startTime.Desc() +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + c.cpuTotal.Set(stat.CPUTime()) + ch <- c.cpuTotal + c.vsize.Set(float64(stat.VirtualMemory())) + ch <- c.vsize + c.rss.Set(float64(stat.ResidentMemory())) + ch <- c.rss + + if startTime, err := stat.StartTime(); err == nil { + c.startTime.Set(startTime) + ch <- c.startTime + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + c.openFDs.Set(float64(fds)) + ch <- c.openFDs + } + + if limits, err := p.NewLimits(); err == nil { + c.maxFDs.Set(float64(limits.OpenFiles)) + ch <- c.maxFDs + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 000000000..32a3986b0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,806 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "sort" + "sync" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (see NewProcessCollector) and a Go collector (see +// NewGoCollector) already registered. This approach to keep default instances +// as global state mirrors the approach of other packages in the Go standard +// library. Note that there are caveats. Change the variables with caution and +// only if you understand the consequences. Users who want to avoid global state +// altogether should not use the convenience function and act on custom +// instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather then the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// RegisterOrGet registers the provided Collector with the DefaultRegisterer and +// returns the Collector, unless an equal Collector was registered before, in +// which case that Collector is returned. +// +// Deprecated: RegisterOrGet is merely a convenience function for the +// implementation as described in the documentation for +// AlreadyRegisteredError. As the use case is relatively rare, this function +// will be removed in a future version of this package to clean up the +// namespace. +func RegisterOrGet(c Collector) (Collector, error) { + if err := Register(c); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + return are.ExistingCollector, nil + } + return nil, err + } + return c, nil +} + +// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning +// an error. +// +// Deprecated: This is deprecated for the same reason RegisterOrGet is. See +// there for details. +func MustRegisterOrGet(c Collector) Collector { + c, err := RegisterOrGet(c) + if err != nil { + panic(err) + } + return c +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that +// gathers from the previous DefaultGatherers but then merges the MetricFamily +// protobufs returned from the provided hook function with the MetricFamily +// protobufs returned from the original DefaultGatherer. +// +// Deprecated: This function manipulates the DefaultGatherer variable. Consider +// the implications, i.e. don't do this concurrently with any uses of the +// DefaultGatherer. In the rare cases where you need to inject MetricFamily +// protobufs directly, it is recommended to use a custom Registry and combine it +// with a custom Gatherer using the Gatherers type (see +// there). SetMetricFamilyInjectionHook only exists for compatibility reasons +// with previous versions of this package. +func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { + DefaultGatherer = Gatherers{ + DefaultGatherer, + GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), + } +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer r.mtx.Unlock() + // Coduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() + for _, collector := range r.collectorsByID { + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) + } + + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + + r.mtx.RUnlock() + + // Drain metricChan in case of premature return. + defer func() { + for _ = range metricChan { + } + }() + + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + errs = append(errs, fmt.Errorf( + "error collecting metric %v: %s", desc, err, + )) + continue + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + errs = append(errs, fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + )) + continue + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + )) + continue + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + errs = append(errs, fmt.Errorf( + "empty metric collected: %s", dtoMetric, + )) + continue + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + if r.pedanticChecksEnabled { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + errs = append(errs, fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + )) + continue + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + errs = append(errs, err) + continue + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice whith empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is alread in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 000000000..bce05bf9a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,534 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported + // for q will be the φ-quantile value for some φ between q-e and q+e. + // The default value is DefObjectives. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if len(opts.Objectives) == 0 { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { + return m.MetricVec.WithLabelValues(lvs...).(Summary) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Summary { + return m.MetricVec.With(labels).(Summary) +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 000000000..5faf7e6e3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,138 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + *MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 000000000..a944c3775 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,234 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "math" + "sort" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + // valBits containst the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + valType ValueType + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 000000000..7f3eef9a4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,404 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects the children. + children map[uint64][]metricWithLabelValues + desc *Desc + + newMetric func(labelValues ...string) Metric + hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized MetricVec. The concrete value is +// returned for embedding into another struct. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { + return &MetricVec{ + children: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.children { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabelValues(h, lvs), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabels(h, labels), nil +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return metric +} + +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) + if err != nil { + panic(err) + } + return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + return m.deleteByHashWithLabelValues(h, lvs) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.deleteByHashWithLabels(h, labels) +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + + i := m.findMetricWithLabelValues(metrics, lvs) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + i := m.findMetricWithLabels(metrics, labels) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, val := range vals { + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithLabelValues(hash, lvs) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithLabelValues(hash, lvs) + if !ok { + // Copy to avoid allocation in case wo don't go down this code path. + copiedLVs := make([]string, len(lvs)) + copy(copiedLVs, lvs) + metric = m.newMetric(copiedLVs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithLabels(hash, labels) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithLabels(hash, labels) + if !ok { + lvs := m.extractLabelValues(labels) + metric = m.newMetric(lvs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithLabelValues gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { + for i, metric := range metrics { + if m.matchLabelValues(metric.values, lvs) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { + for i, metric := range metrics { + if m.matchLabels(metric.values, labels) { + return i + } + } + return len(metrics) +} + +func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { + if len(values) != len(lvs) { + return false + } + for i, v := range values { + if v != lvs[i] { + return false + } + } + return true +} + +func (m *MetricVec) matchLabels(values []string, labels Labels) bool { + if len(labels) != len(values) { + return false + } + for i, k := range m.desc.variableLabels { + if values[i] != labels[k] { + return false + } + } + return true +} + +func (m *MetricVec) extractLabelValues(labels Labels) []string { + labelValues := make([]string, len(labels)) + for i, k := range m.desc.variableLabels { + labelValues[i] = labels[k] + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 000000000..20110e410 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 000000000..b065f8683 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + LabelPair + Gauge + Counter + Quantile + Summary + Untyped + Histogram + Bucket + Metric + MetricFamily +*/ +package io_prometheus_client + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} diff --git a/vendor/github.com/prometheus/client_model/ruby/LICENSE b/vendor/github.com/prometheus/client_model/ruby/LICENSE new file mode 100644 index 000000000..11069edd7 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/ruby/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 000000000..636a2c1a5 --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 000000000..c092723e8 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurrs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occurred. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 000000000..11839ed65 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 000000000..c71bcb981 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 000000000..dc2eedeef --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 000000000..f11321cd0 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,303 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, "+Inf", + float64(metric.Histogram.GetSampleCount()), + out, + ) + if err != nil { + return written, err + } + written += n + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +var ( + escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) + escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + if includeDoubleQuote { + return escapeWithDoubleQuote.Replace(v) + } + + return escape.Replace(v) +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 000000000..b86290afa --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,757 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 000000000..648b38cb6 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 000000000..35e739c7a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 000000000..fc4de4106 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 000000000..038fc1c90 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 000000000..41051a01a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 000000000..6eda08a73 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 000000000..f7250909b --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,103 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + separator = []byte{0} + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 000000000..a7b969170 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 000000000..8762b13c6 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 000000000..bb99889d2 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 000000000..74ed5a9f7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 000000000..c9d8fb1a2 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// semantics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 000000000..53c5e9aa1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 000000000..d3a826807 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 000000000..e2acd6d40 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 000000000..b6c6b2ce1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,82 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "path" + + "github.com/prometheus/procfs/nfs" + "github.com/prometheus/procfs/xfs" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSClientRPCStats retrieves NFS client RPC statistics. +func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 000000000..1ad21c91a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import "strconv" + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 000000000..e36d4a3bd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,259 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 000000000..9dc19583d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 000000000..e95ddbc67 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,569 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10Len = 10 + fieldTransport11Len = 13 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueTime time.Duration + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseTime time.Duration + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTime time.Duration + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[2:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + switch statVersion { + case statVersion10: + if len(ss) != fieldTransport10Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + if len(ss) != fieldTransport11Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11Len) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + return &NFSTransportStats{ + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTime: time.Duration(ns[4]) * time.Second, + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 000000000..3f2523371 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,216 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func NewNetDev() (NetDev, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetDev() +} + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NewNetDev() (NetDev, error) { + return newNetDev(fs.Path("net/dev")) +} + +// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NewNetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + nd := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := nd.parseLine(s.Text()) + if err != nil { + return nd, err + } + + nd[line.Name] = *line + } + + return nd, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (nd NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(nd)) + for _, ifc := range nd { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go new file mode 100644 index 000000000..651bf6819 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfs implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientID uint64 + SetClientIDConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetACL uint64 + SetACL uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeID uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateID uint64 + FreeStateID uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientID uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// ClientRPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go new file mode 100644 index 000000000..95a83cc5b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values { + return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) + } + + // This function currently supports mapping 59 NFS v4 client stats. Older + // kernels may emit fewer stats, so we must detect this and pad out the + // values to match the expected slice size. + if values < 59 { + newValues := make([]uint64, 60) + copy(newValues, v) + v = newValues + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientID: v[13], + SetClientIDConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetACL: v[33], + SetACL: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeID: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateID: v[50], + FreeStateID: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientID: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go new file mode 100644 index 000000000..c0d3a5ad9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFS metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFS metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFS file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go new file mode 100644 index 000000000..57bb4a358 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 000000000..7cf5b8acf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,238 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 000000000..0251c83bf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 000000000..f04ba6fda --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,150 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 000000000..d06c26eba --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// process is a member. +func (p Proc) NewNamespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 000000000..3cf2a9f18 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,188 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 000000000..61eb6b0e3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,232 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 000000000..ffe9df50d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldnt parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 000000000..2bc0ef342 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + fieldPushAil = "push_ail" + fieldXstrat = "xstrat" + fieldAbtb2 = "abtb2" + fieldAbtc2 = "abtc2" + fieldBmbt2 = "bmbt2" + fieldIbt2 = "ibt2" + fieldFibt2 = "fibt2" + fieldQm = "qm" + fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 000000000..d86794b7c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go deleted file mode 100644 index 2c8568c1f..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package announced contains tools for announcing API group factories. This is -// distinct from registration (in the 'registered' package) in that it's safe -// to announce every possible group linked in, but only groups requested at -// runtime should be registered. This package contains both a registry, and -// factory code (which was formerly copy-pasta in every install package). -package announced - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" -) - -// APIGroupFactoryRegistry allows for groups and versions to announce themselves, -// which simply makes them available and doesn't take other actions. Later, -// users of the registry can select which groups and versions they'd actually -// like to register with an APIRegistrationManager. -// -// (Right now APIRegistrationManager has separate 'registration' and 'enabled' -// concepts-- APIGroupFactory is going to take over the former function; -// they will overlap untill the refactoring is finished.) -// -// The key is the group name. After initialization, this should be treated as -// read-only. It is implemented as a map from group name to group factory, and -// it is safe to use this knowledge to manually pick out groups to register -// (e.g., for testing). -type APIGroupFactoryRegistry map[string]*GroupMetaFactory - -func (gar APIGroupFactoryRegistry) group(groupName string) *GroupMetaFactory { - gmf, ok := gar[groupName] - if !ok { - gmf = &GroupMetaFactory{VersionArgs: map[string]*GroupVersionFactoryArgs{}} - gar[groupName] = gmf - } - return gmf -} - -// AnnounceGroupVersion adds the particular arguments for this group version to the group factory. -func (gar APIGroupFactoryRegistry) AnnounceGroupVersion(gvf *GroupVersionFactoryArgs) error { - gmf := gar.group(gvf.GroupName) - if _, ok := gmf.VersionArgs[gvf.VersionName]; ok { - return fmt.Errorf("version %q in group %q has already been announced", gvf.VersionName, gvf.GroupName) - } - gmf.VersionArgs[gvf.VersionName] = gvf - return nil -} - -// AnnounceGroup adds the group-wide arguments to the group factory. -func (gar APIGroupFactoryRegistry) AnnounceGroup(args *GroupMetaFactoryArgs) error { - gmf := gar.group(args.GroupName) - if gmf.GroupArgs != nil { - return fmt.Errorf("group %q has already been announced", args.GroupName) - } - gmf.GroupArgs = args - return nil -} - -// RegisterAndEnableAll throws every factory at the specified API registration -// manager, and lets it decide which to register. (If you want to do this a la -// cart, you may look through gar itself-- it's just a map.) -func (gar APIGroupFactoryRegistry) RegisterAndEnableAll(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - for groupName, gmf := range gar { - if err := gmf.Register(m); err != nil { - return fmt.Errorf("error registering %v: %v", groupName, err) - } - if err := gmf.Enable(m, scheme); err != nil { - return fmt.Errorf("error enabling %v: %v", groupName, err) - } - } - return nil -} - -// AnnouncePreconstructedFactory announces a factory which you've manually assembled. -// You may call this instead of calling AnnounceGroup and AnnounceGroupVersion. -func (gar APIGroupFactoryRegistry) AnnouncePreconstructedFactory(gmf *GroupMetaFactory) error { - name := gmf.GroupArgs.GroupName - if _, exists := gar[name]; exists { - return fmt.Errorf("the group %q has already been announced.", name) - } - gar[name] = gmf - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go deleted file mode 100644 index 154ed08f5..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go +++ /dev/null @@ -1,255 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package announced - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apimachinery" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -type SchemeFunc func(*runtime.Scheme) error -type VersionToSchemeFunc map[string]SchemeFunc - -// GroupVersionFactoryArgs contains all the per-version parts of a GroupMetaFactory. -type GroupVersionFactoryArgs struct { - GroupName string - VersionName string - - AddToScheme SchemeFunc -} - -// GroupMetaFactoryArgs contains the group-level args of a GroupMetaFactory. -type GroupMetaFactoryArgs struct { - // GroupName is the name of the API-Group - // - // example: 'servicecatalog.k8s.io' - GroupName string - VersionPreferenceOrder []string - // RootScopedKinds are resources that are not namespaced. - RootScopedKinds sets.String // nil is allowed - IgnoredKinds sets.String // nil is allowed - - // May be nil if there are no internal objects. - AddInternalObjectsToScheme SchemeFunc -} - -// NewGroupMetaFactory builds the args for you. This is for if you're -// constructing a factory all at once and not using the registry. -func NewGroupMetaFactory(groupArgs *GroupMetaFactoryArgs, versions VersionToSchemeFunc) *GroupMetaFactory { - gmf := &GroupMetaFactory{ - GroupArgs: groupArgs, - VersionArgs: map[string]*GroupVersionFactoryArgs{}, - } - for v, f := range versions { - gmf.VersionArgs[v] = &GroupVersionFactoryArgs{ - GroupName: groupArgs.GroupName, - VersionName: v, - AddToScheme: f, - } - } - return gmf -} - -// Announce adds this Group factory to the global factory registry. It should -// only be called if you constructed the GroupMetaFactory yourself via -// NewGroupMetaFactory. -// Note that this will panic on an error, since it's expected that you'll be -// calling this at initialization time and any error is a result of a -// programmer importing the wrong set of packages. If this assumption doesn't -// work for you, just call DefaultGroupFactoryRegistry.AnnouncePreconstructedFactory -// yourself. -func (gmf *GroupMetaFactory) Announce(groupFactoryRegistry APIGroupFactoryRegistry) *GroupMetaFactory { - if err := groupFactoryRegistry.AnnouncePreconstructedFactory(gmf); err != nil { - panic(err) - } - return gmf -} - -// GroupMetaFactory has the logic for actually assembling and registering a group. -// -// There are two ways of obtaining one of these. -// 1. You can announce your group and versions separately, and then let the -// GroupFactoryRegistry assemble this object for you. (This allows group and -// versions to be imported separately, without referencing each other, to -// keep import trees small.) -// 2. You can call NewGroupMetaFactory(), which is mostly a drop-in replacement -// for the old, bad way of doing things. You can then call .Announce() to -// announce your constructed factory to any code that would like to do -// things the new, better way. -// -// Note that GroupMetaFactory actually does construct GroupMeta objects, but -// currently it does so in a way that's very entangled with an -// APIRegistrationManager. It's a TODO item to cleanly separate that interface. -type GroupMetaFactory struct { - GroupArgs *GroupMetaFactoryArgs - // map of version name to version factory - VersionArgs map[string]*GroupVersionFactoryArgs - - // assembled by Register() - prioritizedVersionList []schema.GroupVersion -} - -// Register constructs the finalized prioritized version list and sanity checks -// the announced group & versions. Then it calls register. -func (gmf *GroupMetaFactory) Register(m *registered.APIRegistrationManager) error { - if gmf.GroupArgs == nil { - return fmt.Errorf("partially announced groups are not allowed, only got versions: %#v", gmf.VersionArgs) - } - if len(gmf.VersionArgs) == 0 { - return fmt.Errorf("group %v announced but no versions announced", gmf.GroupArgs.GroupName) - } - - pvSet := sets.NewString(gmf.GroupArgs.VersionPreferenceOrder...) - if pvSet.Len() != len(gmf.GroupArgs.VersionPreferenceOrder) { - return fmt.Errorf("preference order for group %v has duplicates: %v", gmf.GroupArgs.GroupName, gmf.GroupArgs.VersionPreferenceOrder) - } - prioritizedVersions := []schema.GroupVersion{} - for _, v := range gmf.GroupArgs.VersionPreferenceOrder { - prioritizedVersions = append( - prioritizedVersions, - schema.GroupVersion{ - Group: gmf.GroupArgs.GroupName, - Version: v, - }, - ) - } - - // Go through versions that weren't explicitly prioritized. - unprioritizedVersions := []schema.GroupVersion{} - for _, v := range gmf.VersionArgs { - if v.GroupName != gmf.GroupArgs.GroupName { - return fmt.Errorf("found %v/%v in group %v?", v.GroupName, v.VersionName, gmf.GroupArgs.GroupName) - } - if pvSet.Has(v.VersionName) { - pvSet.Delete(v.VersionName) - continue - } - unprioritizedVersions = append(unprioritizedVersions, schema.GroupVersion{Group: v.GroupName, Version: v.VersionName}) - } - if len(unprioritizedVersions) > 1 { - glog.Warningf("group %v has multiple unprioritized versions: %#v. They will have an arbitrary preference order!", gmf.GroupArgs.GroupName, unprioritizedVersions) - } - if pvSet.Len() != 0 { - return fmt.Errorf("group %v has versions in the priority list that were never announced: %s", gmf.GroupArgs.GroupName, pvSet) - } - prioritizedVersions = append(prioritizedVersions, unprioritizedVersions...) - m.RegisterVersions(prioritizedVersions) - gmf.prioritizedVersionList = prioritizedVersions - return nil -} - -func (gmf *GroupMetaFactory) newRESTMapper(scheme *runtime.Scheme, externalVersions []schema.GroupVersion, groupMeta *apimachinery.GroupMeta) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString() - if gmf.GroupArgs.RootScopedKinds != nil { - rootScoped = gmf.GroupArgs.RootScopedKinds - } - ignoredKinds := sets.NewString() - if gmf.GroupArgs.IgnoredKinds != nil { - ignoredKinds = gmf.GroupArgs.IgnoredKinds - } - - mapper := meta.NewDefaultRESTMapper(externalVersions, groupMeta.InterfacesFor) - for _, gv := range externalVersions { - for kind := range scheme.KnownTypes(gv) { - if ignoredKinds.Has(kind) { - continue - } - scope := meta.RESTScopeNamespace - if rootScoped.Has(kind) { - scope = meta.RESTScopeRoot - } - mapper.Add(gv.WithKind(kind), scope) - } - } - - return mapper -} - -// Enable enables group versions that are allowed, adds methods to the scheme, etc. -func (gmf *GroupMetaFactory) Enable(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - externalVersions := []schema.GroupVersion{} - for _, v := range gmf.prioritizedVersionList { - if !m.IsAllowedVersion(v) { - continue - } - externalVersions = append(externalVersions, v) - if err := m.EnableVersions(v); err != nil { - return err - } - gmf.VersionArgs[v.Version].AddToScheme(scheme) - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", gmf.GroupArgs.GroupName) - return nil - } - - if gmf.GroupArgs.AddInternalObjectsToScheme != nil { - gmf.GroupArgs.AddInternalObjectsToScheme(scheme) - } - - preferredExternalVersion := externalVersions[0] - accessor := meta.NewAccessor() - - groupMeta := &apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - SelfLinker: runtime.SelfLinker(accessor), - } - for _, v := range externalVersions { - gvf := gmf.VersionArgs[v.Version] - if err := groupMeta.AddVersionInterfaces( - schema.GroupVersion{Group: gvf.GroupName, Version: gvf.VersionName}, - &meta.VersionInterfaces{ - ObjectConvertor: scheme, - MetadataAccessor: accessor, - }, - ); err != nil { - return err - } - } - groupMeta.InterfacesFor = groupMeta.DefaultInterfacesFor - groupMeta.RESTMapper = gmf.newRESTMapper(scheme, externalVersions, groupMeta) - - if err := m.RegisterGroup(*groupMeta); err != nil { - return err - } - return nil -} - -// RegisterAndEnable is provided only to allow this code to get added in multiple steps. -// It's really bad that this is called in init() methods, but supporting this -// temporarily lets us do the change incrementally. -func (gmf *GroupMetaFactory) RegisterAndEnable(registry *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - if err := gmf.Register(registry); err != nil { - return err - } - if err := gmf.Enable(registry, scheme); err != nil { - return err - } - - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go deleted file mode 100644 index b238454b2..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package apimachinery contains the generic API machinery code that -// is common to both server and clients. -// This package should never import specific API objects. -package apimachinery // import "k8s.io/apimachinery/pkg/apimachinery" diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go deleted file mode 100644 index 5150db016..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. -package registered - -import ( - "fmt" - "sort" - "strings" - - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apimachinery" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -// APIRegistrationManager provides the concept of what API groups are enabled. -// -// TODO: currently, it also provides a "registered" concept. But it's wrong to -// have both concepts in the same object. Therefore the "announced" package is -// going to take over the registered concept. After all the install packages -// are switched to using the announce package instead of this package, then we -// can combine the registered/enabled concepts in this object. Simplifying this -// isn't easy right now because there are so many callers of this package. -type APIRegistrationManager struct { - // registeredGroupVersions stores all API group versions for which RegisterGroup is called. - registeredVersions map[schema.GroupVersion]struct{} - - // enabledVersions represents all enabled API versions. It should be a - // subset of registeredVersions. Please call EnableVersions() to add - // enabled versions. - enabledVersions map[schema.GroupVersion]struct{} - - // map of group meta for all groups. - groupMetaMap map[string]*apimachinery.GroupMeta - - // envRequestedVersions represents the versions requested via the - // KUBE_API_VERSIONS environment variable. The install package of each group - // checks this list before add their versions to the latest package and - // Scheme. This list is small and order matters, so represent as a slice - envRequestedVersions []schema.GroupVersion -} - -// NewAPIRegistrationManager constructs a new manager. The argument ought to be -// the value of the KUBE_API_VERSIONS env var, or a value of this which you -// wish to test. -func NewAPIRegistrationManager(kubeAPIVersions string) (*APIRegistrationManager, error) { - m := &APIRegistrationManager{ - registeredVersions: map[schema.GroupVersion]struct{}{}, - enabledVersions: map[schema.GroupVersion]struct{}{}, - groupMetaMap: map[string]*apimachinery.GroupMeta{}, - envRequestedVersions: []schema.GroupVersion{}, - } - - if len(kubeAPIVersions) != 0 { - for _, version := range strings.Split(kubeAPIVersions, ",") { - gv, err := schema.ParseGroupVersion(version) - if err != nil { - return nil, fmt.Errorf("invalid api version: %s in KUBE_API_VERSIONS: %s.", - version, kubeAPIVersions) - } - m.envRequestedVersions = append(m.envRequestedVersions, gv) - } - } - return m, nil -} - -func NewOrDie(kubeAPIVersions string) *APIRegistrationManager { - m, err := NewAPIRegistrationManager(kubeAPIVersions) - if err != nil { - glog.Fatalf("Could not construct version manager: %v (KUBE_API_VERSIONS=%q)", err, kubeAPIVersions) - } - return m -} - -// RegisterVersions adds the given group versions to the list of registered group versions. -func (m *APIRegistrationManager) RegisterVersions(availableVersions []schema.GroupVersion) { - for _, v := range availableVersions { - m.registeredVersions[v] = struct{}{} - } -} - -// RegisterGroup adds the given group to the list of registered groups. -func (m *APIRegistrationManager) RegisterGroup(groupMeta apimachinery.GroupMeta) error { - groupName := groupMeta.GroupVersion.Group - if _, found := m.groupMetaMap[groupName]; found { - return fmt.Errorf("group %q is already registered in groupsMap: %v", groupName, m.groupMetaMap) - } - m.groupMetaMap[groupName] = &groupMeta - return nil -} - -// EnableVersions adds the versions for the given group to the list of enabled versions. -// Note that the caller should call RegisterGroup before calling this method. -// The caller of this function is responsible to add the versions to scheme and RESTMapper. -func (m *APIRegistrationManager) EnableVersions(versions ...schema.GroupVersion) error { - var unregisteredVersions []schema.GroupVersion - for _, v := range versions { - if _, found := m.registeredVersions[v]; !found { - unregisteredVersions = append(unregisteredVersions, v) - } - m.enabledVersions[v] = struct{}{} - } - if len(unregisteredVersions) != 0 { - return fmt.Errorf("Please register versions before enabling them: %v", unregisteredVersions) - } - return nil -} - -// IsAllowedVersion returns if the version is allowed by the KUBE_API_VERSIONS -// environment variable. If the environment variable is empty, then it always -// returns true. -func (m *APIRegistrationManager) IsAllowedVersion(v schema.GroupVersion) bool { - if len(m.envRequestedVersions) == 0 { - return true - } - for _, envGV := range m.envRequestedVersions { - if v == envGV { - return true - } - } - return false -} - -// IsEnabledVersion returns if a version is enabled. -func (m *APIRegistrationManager) IsEnabledVersion(v schema.GroupVersion) bool { - _, found := m.enabledVersions[v] - return found -} - -// EnabledVersions returns all enabled versions. Groups are randomly ordered, but versions within groups -// are priority order from best to worst -func (m *APIRegistrationManager) EnabledVersions() []schema.GroupVersion { - ret := []schema.GroupVersion{} - for _, groupMeta := range m.groupMetaMap { - for _, version := range groupMeta.GroupVersions { - if m.IsEnabledVersion(version) { - ret = append(ret, version) - } - } - } - return ret -} - -// EnabledVersionsForGroup returns all enabled versions for a group in order of best to worst -func (m *APIRegistrationManager) EnabledVersionsForGroup(group string) []schema.GroupVersion { - groupMeta, ok := m.groupMetaMap[group] - if !ok { - return []schema.GroupVersion{} - } - - ret := []schema.GroupVersion{} - for _, version := range groupMeta.GroupVersions { - if m.IsEnabledVersion(version) { - ret = append(ret, version) - } - } - return ret -} - -// Group returns the metadata of a group if the group is registered, otherwise -// an error is returned. -func (m *APIRegistrationManager) Group(group string) (*apimachinery.GroupMeta, error) { - groupMeta, found := m.groupMetaMap[group] - if !found { - return nil, fmt.Errorf("group %v has not been registered", group) - } - groupMetaCopy := *groupMeta - return &groupMetaCopy, nil -} - -// IsRegistered takes a string and determines if it's one of the registered groups -func (m *APIRegistrationManager) IsRegistered(group string) bool { - _, found := m.groupMetaMap[group] - return found -} - -// IsRegisteredVersion returns if a version is registered. -func (m *APIRegistrationManager) IsRegisteredVersion(v schema.GroupVersion) bool { - _, found := m.registeredVersions[v] - return found -} - -// RegisteredGroupVersions returns all registered group versions. -func (m *APIRegistrationManager) RegisteredGroupVersions() []schema.GroupVersion { - ret := []schema.GroupVersion{} - for groupVersion := range m.registeredVersions { - ret = append(ret, groupVersion) - } - return ret -} - -// InterfacesFor is a union meta.VersionInterfacesFunc func for all registered types -func (m *APIRegistrationManager) InterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { - groupMeta, err := m.Group(version.Group) - if err != nil { - return nil, err - } - return groupMeta.InterfacesFor(version) -} - -// TODO: This is an expedient function, because we don't check if a Group is -// supported throughout the code base. We will abandon this function and -// checking the error returned by the Group() function. -func (m *APIRegistrationManager) GroupOrDie(group string) *apimachinery.GroupMeta { - groupMeta, found := m.groupMetaMap[group] - if !found { - if group == "" { - panic("The legacy v1 API is not registered.") - } else { - panic(fmt.Sprintf("Group %s is not registered.", group)) - } - } - groupMetaCopy := *groupMeta - return &groupMetaCopy -} - -// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: -// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR -// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy -// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, -// all other groups alphabetical. -func (m *APIRegistrationManager) RESTMapper(versionPatterns ...schema.GroupVersion) meta.RESTMapper { - unionMapper := meta.MultiRESTMapper{} - unionedGroups := sets.NewString() - for enabledVersion := range m.enabledVersions { - if !unionedGroups.Has(enabledVersion.Group) { - unionedGroups.Insert(enabledVersion.Group) - groupMeta := m.groupMetaMap[enabledVersion.Group] - unionMapper = append(unionMapper, groupMeta.RESTMapper) - } - } - - if len(versionPatterns) != 0 { - resourcePriority := []schema.GroupVersionResource{} - kindPriority := []schema.GroupVersionKind{} - for _, versionPriority := range versionPatterns { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - if len(m.envRequestedVersions) != 0 { - resourcePriority := []schema.GroupVersionResource{} - kindPriority := []schema.GroupVersionKind{} - - for _, versionPriority := range m.envRequestedVersions { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - prioritizedGroups := []string{"", "extensions", "metrics"} - resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...) - - prioritizedGroupsSet := sets.NewString(prioritizedGroups...) - remainingGroups := sets.String{} - for enabledVersion := range m.enabledVersions { - if !prioritizedGroupsSet.Has(enabledVersion.Group) { - remainingGroups.Insert(enabledVersion.Group) - } - } - - remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...) - resourcePriority = append(resourcePriority, remainingResourcePriority...) - kindPriority = append(kindPriority, remainingKindPriority...) - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} -} - -// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, -// then any non-preferred version of the group second. -func (m *APIRegistrationManager) prioritiesForGroups(groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { - resourcePriority := []schema.GroupVersionResource{} - kindPriority := []schema.GroupVersionKind{} - - for _, group := range groups { - availableVersions := m.EnabledVersionsForGroup(group) - if len(availableVersions) > 0 { - resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) - } - } - for _, group := range groups { - resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) - kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) - } - - return resourcePriority, kindPriority -} - -// AllPreferredGroupVersions returns the preferred versions of all registered -// groups in the form of "group1/version1,group2/version2,..." -func (m *APIRegistrationManager) AllPreferredGroupVersions() string { - if len(m.groupMetaMap) == 0 { - return "" - } - var defaults []string - for _, groupMeta := range m.groupMetaMap { - defaults = append(defaults, groupMeta.GroupVersion.String()) - } - sort.Strings(defaults) - return strings.Join(defaults, ",") -} - -// ValidateEnvRequestedVersions returns a list of versions that are requested in -// the KUBE_API_VERSIONS environment variable, but not enabled. -func (m *APIRegistrationManager) ValidateEnvRequestedVersions() []schema.GroupVersion { - var missingVersions []schema.GroupVersion - for _, v := range m.envRequestedVersions { - if _, found := m.enabledVersions[v]; !found { - missingVersions = append(missingVersions, v) - } - } - return missingVersions -} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go deleted file mode 100644 index 213e34bc0..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apimachinery - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupMeta stores the metadata of a group. -type GroupMeta struct { - // GroupVersion represents the preferred version of the group. - GroupVersion schema.GroupVersion - - // GroupVersions is Group + all versions in that group. - GroupVersions []schema.GroupVersion - - // SelfLinker can set or get the SelfLink field of all API types. - // TODO: when versioning changes, make this part of each API definition. - // TODO(lavalamp): Combine SelfLinker & ResourceVersioner interfaces, force all uses - // to go through the InterfacesFor method below. - SelfLinker runtime.SelfLinker - - // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known - // versions. - RESTMapper meta.RESTMapper - - // InterfacesFor returns the default Codec and ResourceVersioner for a given version - // string, or an error if the version is not known. - // TODO: make this stop being a func pointer and always use the default - // function provided below once every place that populates this field has been changed. - InterfacesFor func(version schema.GroupVersion) (*meta.VersionInterfaces, error) - - // InterfacesByVersion stores the per-version interfaces. - InterfacesByVersion map[schema.GroupVersion]*meta.VersionInterfaces -} - -// DefaultInterfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -// TODO: Remove the "Default" prefix. -func (gm *GroupMeta) DefaultInterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { - if v, ok := gm.InterfacesByVersion[version]; ok { - return v, nil - } - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, gm.GroupVersions) -} - -// AddVersionInterfaces adds the given version to the group. Only call during -// init, after that GroupMeta objects should be immutable. Not thread safe. -// (If you use this, be sure to set .InterfacesFor = .DefaultInterfacesFor) -// TODO: remove the "Interfaces" suffix and make this also maintain the -// .GroupVersions member. -func (gm *GroupMeta) AddVersionInterfaces(version schema.GroupVersion, interfaces *meta.VersionInterfaces) error { - if e, a := gm.GroupVersion.Group, version.Group; a != e { - return fmt.Errorf("got a version in group %v, but am in group %v", a, e) - } - if gm.InterfacesByVersion == nil { - gm.InterfacesByVersion = make(map[schema.GroupVersion]*meta.VersionInterfaces) - } - gm.InterfacesByVersion[version] = interfaces - - // TODO: refactor to make the below error not possible, this function - // should *set* GroupVersions rather than depend on it. - for _, v := range gm.GroupVersions { - if v == version { - return nil - } - } - return fmt.Errorf("added a version interface without the corresponding version %v being in the list %#v", version, gm.GroupVersions) -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go new file mode 100644 index 000000000..9a09fe54d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" +) + +const ( + shardsCount int = 32 +) + +type Cache []*cacheShard + +func NewCache(maxSize int) Cache { + if maxSize < shardsCount { + maxSize = shardsCount + } + cache := make(Cache, shardsCount) + for i := 0; i < shardsCount; i++ { + cache[i] = &cacheShard{ + items: make(map[uint64]interface{}), + maxSize: maxSize / shardsCount, + } + } + return cache +} + +func (c Cache) getShard(index uint64) *cacheShard { + return c[index%uint64(shardsCount)] +} + +// Returns true if object already existed, false otherwise. +func (c *Cache) Add(index uint64, obj interface{}) bool { + return c.getShard(index).add(index, obj) +} + +func (c *Cache) Get(index uint64) (obj interface{}, found bool) { + return c.getShard(index).get(index) +} + +type cacheShard struct { + items map[uint64]interface{} + sync.RWMutex + maxSize int +} + +// Returns true if object already existed, false otherwise. +func (s *cacheShard) add(index uint64, obj interface{}) bool { + s.Lock() + defer s.Unlock() + _, isOverwrite := s.items[index] + if !isOverwrite && len(s.items) >= s.maxSize { + var randomKey uint64 + for randomKey = range s.items { + break + } + delete(s.items, randomKey) + } + s.items[index] = obj + return isOverwrite +} + +func (s *cacheShard) get(index uint64) (obj interface{}, found bool) { + s.RLock() + defer s.RUnlock() + obj, found = s.items[index] + return +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go new file mode 100644 index 000000000..f6b307aa6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "github.com/hashicorp/golang-lru" +) + +// Clock defines an interface for obtaining the current time +type Clock interface { + Now() time.Time +} + +// realClock implements the Clock interface by calling time.Now() +type realClock struct{} + +func (realClock) Now() time.Time { return time.Now() } + +// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with +// a ttl beyond which keys are forcibly expired. +type LRUExpireCache struct { + // clock is used to obtain the current time + clock Clock + + cache *lru.Cache + lock sync.Mutex +} + +// NewLRUExpireCache creates an expiring cache with the given size +func NewLRUExpireCache(maxSize int) *LRUExpireCache { + return NewLRUExpireCacheWithClock(maxSize, realClock{}) +} + +// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time. +func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache { + cache, err := lru.New(maxSize) + if err != nil { + // if called with an invalid size + panic(err) + } + return &LRUExpireCache{clock: clock, cache: cache} +} + +type cacheEntry struct { + value interface{} + expireTime time.Time +} + +// Add adds the value to the cache at key with the specified maximum duration. +func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)}) +} + +// Get returns the value at the specified key from the cache if it exists and is not +// expired, or returns false. +func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + e, ok := c.cache.Get(key) + if !ok { + return nil, false + } + if c.clock.Now().After(e.(*cacheEntry).expireTime) { + c.cache.Remove(key) + return nil, false + } + return e.(*cacheEntry).value, true +} + +// Remove removes the specified key from the cache if it exists +func (c *LRUExpireCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Remove(key) +} + +// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to +// get may return not found. It returns all keys from oldest to newest. +func (c *LRUExpireCache) Keys() []interface{} { + c.lock.Lock() + defer c.lock.Unlock() + return c.cache.Keys() +} diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go new file mode 100644 index 000000000..028c75e8e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -0,0 +1,394 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" +) + +// Config contains all the settings for a Controller. +type Config struct { + // The queue for your objects; either a FIFO or + // a DeltaFIFO. Your Process() function should accept + // the output of this Queue's Pop() method. + Queue + + // Something that can list and watch your objects. + ListerWatcher + + // Something that can process your objects. + Process ProcessFunc + + // The type of your objects. + ObjectType runtime.Object + + // Reprocess everything at least this often. + // Note that if it takes longer for you to clear the queue than this + // period, you will end up processing items in the order determined + // by FIFO.Replace(). Currently, this is random. If this is a + // problem, we can change that replacement policy to append new + // things to the end of the queue instead of replacing the entire + // queue. + FullResyncPeriod time.Duration + + // ShouldResync, if specified, is invoked when the controller's reflector determines the next + // periodic sync should occur. If this returns true, it means the reflector should proceed with + // the resync. + ShouldResync ShouldResyncFunc + + // If true, when Process() returns an error, re-enqueue the object. + // TODO: add interface to let you inject a delay/backoff or drop + // the object completely if desired. Pass the object in + // question to this interface as a parameter. + RetryOnError bool +} + +// ShouldResyncFunc is a type of function that indicates if a reflector should perform a +// resync or not. It can be used by a shared informer to support multiple event handlers with custom +// resync periods. +type ShouldResyncFunc func() bool + +// ProcessFunc processes a single object. +type ProcessFunc func(obj interface{}) error + +// Controller is a generic controller framework. +type controller struct { + config Config + reflector *Reflector + reflectorMutex sync.RWMutex + clock clock.Clock +} + +type Controller interface { + Run(stopCh <-chan struct{}) + HasSynced() bool + LastSyncResourceVersion() string +} + +// New makes a new Controller from the given Config. +func New(c *Config) Controller { + ctlr := &controller{ + config: *c, + clock: &clock.RealClock{}, + } + return ctlr +} + +// Run begins processing items, and will continue until a value is sent down stopCh. +// It's an error to call Run more than once. +// Run blocks; call via go. +func (c *controller) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + go func() { + <-stopCh + c.config.Queue.Close() + }() + r := NewReflector( + c.config.ListerWatcher, + c.config.ObjectType, + c.config.Queue, + c.config.FullResyncPeriod, + ) + r.ShouldResync = c.config.ShouldResync + r.clock = c.clock + + c.reflectorMutex.Lock() + c.reflector = r + c.reflectorMutex.Unlock() + + var wg wait.Group + defer wg.Wait() + + wg.StartWithChannel(stopCh, r.Run) + + wait.Until(c.processLoop, time.Second, stopCh) +} + +// Returns true once this controller has completed an initial resource listing +func (c *controller) HasSynced() bool { + return c.config.Queue.HasSynced() +} + +func (c *controller) LastSyncResourceVersion() string { + if c.reflector == nil { + return "" + } + return c.reflector.LastSyncResourceVersion() +} + +// processLoop drains the work queue. +// TODO: Consider doing the processing in parallel. This will require a little thought +// to make sure that we don't end up processing the same object multiple times +// concurrently. +// +// TODO: Plumb through the stopCh here (and down to the queue) so that this can +// actually exit when the controller is stopped. Or just give up on this stuff +// ever being stoppable. Converting this whole package to use Context would +// also be helpful. +func (c *controller) processLoop() { + for { + obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) + if err != nil { + if err == FIFOClosedError { + return + } + if c.config.RetryOnError { + // This is the safe way to re-enqueue. + c.config.Queue.AddIfNotPresent(obj) + } + } + } +} + +// ResourceEventHandler can handle notifications for events that happen to a +// resource. The events are informational only, so you can't return an +// error. +// * OnAdd is called when an object is added. +// * OnUpdate is called when an object is modified. Note that oldObj is the +// last known state of the object-- it is possible that several changes +// were combined together, so you can't use this to see every single +// change. OnUpdate is also called when a re-list happens, and it will +// get called even if nothing changed. This is useful for periodically +// evaluating or syncing something. +// * OnDelete will get the final state of the item if it is known, otherwise +// it will get an object of type DeletedFinalStateUnknown. This can +// happen if the watch is closed and misses the delete event and we don't +// notice the deletion until the subsequent re-list. +type ResourceEventHandler interface { + OnAdd(obj interface{}) + OnUpdate(oldObj, newObj interface{}) + OnDelete(obj interface{}) +} + +// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or +// as few of the notification functions as you want while still implementing +// ResourceEventHandler. +type ResourceEventHandlerFuncs struct { + AddFunc func(obj interface{}) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { + if r.AddFunc != nil { + r.AddFunc(obj) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + +// FilteringResourceEventHandler applies the provided filter to all events coming +// in, ensuring the appropriate nested handler method is invoked. An object +// that starts passing the filter after an update is considered an add, and an +// object that stops passing the filter after an update is considered a delete. +type FilteringResourceEventHandler struct { + FilterFunc func(obj interface{}) bool + Handler ResourceEventHandler +} + +// OnAdd calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnAdd(obj) +} + +// OnUpdate ensures the proper handler is called depending on whether the filter matches +func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { + newer := r.FilterFunc(newObj) + older := r.FilterFunc(oldObj) + switch { + case newer && older: + r.Handler.OnUpdate(oldObj, newObj) + case newer && !older: + r.Handler.OnAdd(newObj) + case !newer && older: + r.Handler.OnDelete(oldObj) + default: + // do nothing + } +} + +// OnDelete calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnDelete(obj interface{}) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnDelete(obj) +} + +// DeletionHandlingMetaNamespaceKeyFunc checks for +// DeletedFinalStateUnknown objects before calling +// MetaNamespaceKeyFunc. +func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return MetaNamespaceKeyFunc(obj) +} + +// NewInformer returns a Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// +func NewInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}) error { + // from oldest to newest + for _, d := range obj.(Deltas) { + switch d.Type { + case Sync, Added, Updated: + if old, exists, err := clientState.Get(d.Object); err == nil && exists { + if err := clientState.Update(d.Object); err != nil { + return err + } + h.OnUpdate(old, d.Object) + } else { + if err := clientState.Add(d.Object); err != nil { + return err + } + h.OnAdd(d.Object) + } + case Deleted: + if err := clientState.Delete(d.Object); err != nil { + return err + } + h.OnDelete(d.Object) + } + } + return nil + }, + } + return clientState, New(cfg) +} + +// NewIndexerInformer returns a Indexer and a controller for populating the index +// while also providing event notifications. You should only used the returned +// Index for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// * indexers is the indexer for the received object type. +// +func NewIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}) error { + // from oldest to newest + for _, d := range obj.(Deltas) { + switch d.Type { + case Sync, Added, Updated: + if old, exists, err := clientState.Get(d.Object); err == nil && exists { + if err := clientState.Update(d.Object); err != nil { + return err + } + h.OnUpdate(old, d.Object) + } else { + if err := clientState.Add(d.Object); err != nil { + return err + } + h.OnAdd(d.Object) + } + case Deleted: + if err := clientState.Delete(d.Object); err != nil { + return err + } + h.OnDelete(d.Object) + } + } + return nil + }, + } + return clientState, New(cfg) +} diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go new file mode 100644 index 000000000..45c3b500d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -0,0 +1,659 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/golang/glog" +) + +// NewDeltaFIFO returns a Store which can be used process changes to items. +// +// keyFunc is used to figure out what key an object should have. (It's +// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.) +// +// 'keyLister' is expected to return a list of keys that the consumer of +// this queue "knows about". It is used to decide which items are missing +// when Replace() is called; 'Deleted' deltas are produced for these items. +// It may be nil if you don't need to detect all deletions. +// TODO: consider merging keyLister with this object, tracking a list of +// "known" keys when Pop() is called. Have to think about how that +// affects error retrying. +// NOTE: It is possible to misuse this and cause a race when using an +// external known object source. +// Whether there is a potential race depends on how the comsumer +// modifies knownObjects. In Pop(), process function is called under +// lock, so it is safe to update data structures in it that need to be +// in sync with the queue (e.g. knownObjects). +// +// Example: +// In case of sharedIndexInformer being a consumer +// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/ +// src/k8s.io/client-go/tools/cache/shared_informer.go#L192), +// there is no race as knownObjects (s.indexer) is modified safely +// under DeltaFIFO's lock. The only exceptions are GetStore() and +// GetIndexer() methods, which expose ways to modify the underlying +// storage. Currently these two methods are used for creating Lister +// and internal tests. +// +// Also see the comment on DeltaFIFO. +func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO { + f := &DeltaFIFO{ + items: map[string]Deltas{}, + queue: []string{}, + keyFunc: keyFunc, + knownObjects: knownObjects, + } + f.cond.L = &f.lock + return f +} + +// DeltaFIFO is like FIFO, but allows you to process deletes. +// +// DeltaFIFO is a producer-consumer queue, where a Reflector is +// intended to be the producer, and the consumer is whatever calls +// the Pop() method. +// +// DeltaFIFO solves this use case: +// * You want to process every object change (delta) at most once. +// * When you process an object, you want to see everything +// that's happened to it since you last processed it. +// * You want to process the deletion of objects. +// * You might want to periodically reprocess objects. +// +// DeltaFIFO's Pop(), Get(), and GetByKey() methods return +// interface{} to satisfy the Store/Queue interfaces, but it +// will always return an object of type Deltas. +// +// A note on threading: If you call Pop() in parallel from multiple +// threads, you could end up with multiple threads processing slightly +// different versions of the same object. +// +// A note on the KeyLister used by the DeltaFIFO: It's main purpose is +// to list keys that are "known", for the purpose of figuring out which +// items have been deleted when Replace() or Delete() are called. The deleted +// object will be included in the DeleteFinalStateUnknown markers. These objects +// could be stale. +type DeltaFIFO struct { + // lock/cond protects access to 'items' and 'queue'. + lock sync.RWMutex + cond sync.Cond + + // We depend on the property that items in the set are in + // the queue and vice versa, and that all Deltas in this + // map have at least one Delta. + items map[string]Deltas + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item + // insertion and retrieval, and should be deterministic. + keyFunc KeyFunc + + // knownObjects list keys that are "known", for the + // purpose of figuring out which items have been deleted + // when Replace() or Delete() is called. + knownObjects KeyListerGetter + + // Indication the queue is closed. + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRED operations. + closed bool + closedLock sync.Mutex +} + +var ( + _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue +) + +var ( + // ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas + // object with zero length is encountered (should be impossible, + // but included for completeness). + ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key") +) + +// Close the queue. +func (f *DeltaFIFO) Close() { + f.closedLock.Lock() + defer f.closedLock.Unlock() + f.closed = true + f.cond.Broadcast() +} + +// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or +// DeletedFinalStateUnknown objects. +func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { + if d, ok := obj.(Deltas); ok { + if len(d) == 0 { + return "", KeyError{obj, ErrZeroLengthDeltasObject} + } + obj = d.Newest().Object + } + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return f.keyFunc(obj) +} + +// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// or an Update called first but the first batch of items inserted by Replace() has been popped +func (f *DeltaFIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *DeltaFIFO) Add(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Added, obj) +} + +// Update is just like Add, but makes an Updated Delta. +func (f *DeltaFIFO) Update(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Updated, obj) +} + +// Delete is just like Add, but makes an Deleted Delta. If the item does not +// already exist, it will be ignored. (It may have already been deleted by a +// Replace (re-list), for example. +func (f *DeltaFIFO) Delete(obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if f.knownObjects == nil { + if _, exists := f.items[id]; !exists { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + return nil + } + } else { + // We only want to skip the "deletion" action if the object doesn't + // exist in knownObjects and it doesn't have corresponding item in items. + // Note that even if there is a "deletion" action in items, we can ignore it, + // because it will be deduped automatically in "queueActionLocked" + _, exists, err := f.knownObjects.GetByKey(id) + _, itemsExist := f.items[id] + if err == nil && !exists && !itemsExist { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + return nil + } + } + + return f.queueActionLocked(Deleted, obj) +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +// +// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is +// different from the Add/Update/Delete functions. +func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error { + deltas, ok := obj.(Deltas) + if !ok { + return fmt.Errorf("object must be of type deltas, but got: %#v", obj) + } + id, err := f.KeyOf(deltas.Newest().Object) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.addIfNotPresent(id, deltas) + return nil +} + +// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller +// already holds the fifo lock. +func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) { + f.populated = true + if _, exists := f.items[id]; exists { + return + } + + f.queue = append(f.queue, id) + f.items[id] = deltas + f.cond.Broadcast() +} + +// re-listing and watching can deliver the same update multiple times in any +// order. This will combine the most recent two deltas if they are the same. +func dedupDeltas(deltas Deltas) Deltas { + n := len(deltas) + if n < 2 { + return deltas + } + a := &deltas[n-1] + b := &deltas[n-2] + if out := isDup(a, b); out != nil { + d := append(Deltas{}, deltas[:n-2]...) + return append(d, *out) + } + return deltas +} + +// If a & b represent the same event, returns the delta that ought to be kept. +// Otherwise, returns nil. +// TODO: is there anything other than deletions that need deduping? +func isDup(a, b *Delta) *Delta { + if out := isDeletionDup(a, b); out != nil { + return out + } + // TODO: Detect other duplicate situations? Are there any? + return nil +} + +// keep the one with the most information if both are deletions. +func isDeletionDup(a, b *Delta) *Delta { + if b.Type != Deleted || a.Type != Deleted { + return nil + } + // Do more sophisticated checks, or is this sufficient? + if _, ok := b.Object.(DeletedFinalStateUnknown); ok { + return a + } + return b +} + +// willObjectBeDeletedLocked returns true only if the last delta for the +// given object is Delete. Caller must lock first. +func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool { + deltas := f.items[id] + return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted +} + +// queueActionLocked appends to the delta list for the object. +// Caller must lock first. +func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + + // If object is supposed to be deleted (last event is Deleted), + // then we should ignore Sync events, because it would result in + // recreation of this object. + if actionType == Sync && f.willObjectBeDeletedLocked(id) { + return nil + } + + newDeltas := append(f.items[id], Delta{actionType, obj}) + newDeltas = dedupDeltas(newDeltas) + + _, exists := f.items[id] + if len(newDeltas) > 0 { + if !exists { + f.queue = append(f.queue, id) + } + f.items[id] = newDeltas + f.cond.Broadcast() + } else if exists { + // We need to remove this from our map (extra items + // in the queue are ignored if they are not in the + // map). + delete(f.items, id) + } + return nil +} + +// List returns a list of all the items; it returns the object +// from the most recent Delta. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + return f.listLocked() +} + +func (f *DeltaFIFO) listLocked() []interface{} { + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + // Copy item's slice so operations on this slice + // won't interfere with the object we return. + item = copyDeltas(item) + list = append(list, item.Newest().Object) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *DeltaFIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.items)) + for key := range f.items { + list = append(list, key) + } + return list +} + +// Get returns the complete list of deltas for the requested item, +// or sets exists=false. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.KeyOf(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the complete list of deltas for the requested item, +// setting exists=false if that list is empty. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + d, exists := f.items[key] + if exists { + // Copy item's slice so operations on this slice + // won't interfere with the object we return. + d = copyDeltas(d) + } + return d, exists, nil +} + +// Checks if the queue is closed +func (f *DeltaFIFO) IsClosed() bool { + f.closedLock.Lock() + defer f.closedLock.Unlock() + if f.closed { + return true + } + return false +} + +// Pop blocks until an item is added to the queue, and then returns it. If +// multiple items are ready, they are returned in the order in which they were +// added/updated. The item is removed from the queue (and the store) before it +// is returned, so if you don't successfully process it, you need to add it back +// with AddIfNotPresent(). +// process function is called under lock, so it is safe update data structures +// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc +// may return an instance of ErrRequeue with a nested error to indicate the current +// item should be requeued (equivalent to calling AddIfNotPresent under the lock). +// +// Pop returns a 'Deltas', which has a complete list of all the things +// that happened to the object (deltas) while it was sitting in the queue. +func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.IsClosed() { + return nil, FIFOClosedError + } + + f.cond.Wait() + } + id := f.queue[0] + f.queue = f.queue[1:] + item, ok := f.items[id] + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + if !ok { + // Item may have been deleted subsequently. + continue + } + delete(f.items, id) + err := process(item) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } + // Don't need to copyDeltas here, because we're transferring + // ownership to the caller. + return item, err + } +} + +// Replace will delete the contents of 'f', using instead the given map. +// 'f' takes ownership of the map, you should not reference the map again +// after calling this function. f's queue is reset, too; upon return, it +// will contain the items in the map, in no particular order. +func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { + f.lock.Lock() + defer f.lock.Unlock() + keys := make(sets.String, len(list)) + + for _, item := range list { + key, err := f.KeyOf(item) + if err != nil { + return KeyError{item, err} + } + keys.Insert(key) + if err := f.queueActionLocked(Sync, item); err != nil { + return fmt.Errorf("couldn't enqueue object: %v", err) + } + } + + if f.knownObjects == nil { + // Do deletion detection against our own list. + for k, oldItem := range f.items { + if keys.Has(k) { + continue + } + var deletedObj interface{} + if n := oldItem.Newest(); n != nil { + deletedObj = n.Object + } + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(list) + } + + return nil + } + + // Detect deletions not already in the queue. + knownKeys := f.knownObjects.ListKeys() + queuedDeletions := 0 + for _, k := range knownKeys { + if keys.Has(k) { + continue + } + + deletedObj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + deletedObj = nil + glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + } else if !exists { + deletedObj = nil + glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(list) + queuedDeletions + } + + return nil +} + +// Resync will send a sync event for each item +func (f *DeltaFIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.knownObjects == nil { + return nil + } + + keys := f.knownObjects.ListKeys() + for _, k := range keys { + if err := f.syncKeyLocked(k); err != nil { + return err + } + } + return nil +} + +func (f *DeltaFIFO) syncKey(key string) error { + f.lock.Lock() + defer f.lock.Unlock() + + return f.syncKeyLocked(key) +} + +func (f *DeltaFIFO) syncKeyLocked(key string) error { + obj, exists, err := f.knownObjects.GetByKey(key) + if err != nil { + glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + return nil + } else if !exists { + glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + return nil + } + + // If we are doing Resync() and there is already an event queued for that object, + // we ignore the Resync for it. This is to avoid the race, in which the resync + // comes with the previous value of object (since queueing an event for the object + // doesn't trigger changing the underlying store . + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + if len(f.items[id]) > 0 { + return nil + } + + if err := f.queueActionLocked(Sync, obj); err != nil { + return fmt.Errorf("couldn't queue object: %v", err) + } + return nil +} + +// A KeyListerGetter is anything that knows how to list its keys and look up by key. +type KeyListerGetter interface { + KeyLister + KeyGetter +} + +// A KeyLister is anything that knows how to list its keys. +type KeyLister interface { + ListKeys() []string +} + +// A KeyGetter is anything that knows how to get the value stored under a given key. +type KeyGetter interface { + GetByKey(key string) (interface{}, bool, error) +} + +// DeltaType is the type of a change (addition, deletion, etc) +type DeltaType string + +const ( + Added DeltaType = "Added" + Updated DeltaType = "Updated" + Deleted DeltaType = "Deleted" + // The other types are obvious. You'll get Sync deltas when: + // * A watch expires/errors out and a new list/watch cycle is started. + // * You've turned on periodic syncs. + // (Anything that trigger's DeltaFIFO's Replace() method.) + Sync DeltaType = "Sync" +) + +// Delta is the type stored by a DeltaFIFO. It tells you what change +// happened, and the object's state after* that change. +// +// [*] Unless the change is a deletion, and then you'll get the final +// state of the object before it was deleted. +type Delta struct { + Type DeltaType + Object interface{} +} + +// Deltas is a list of one or more 'Delta's to an individual object. +// The oldest delta is at index 0, the newest delta is the last one. +type Deltas []Delta + +// Oldest is a convenience function that returns the oldest delta, or +// nil if there are no deltas. +func (d Deltas) Oldest() *Delta { + if len(d) > 0 { + return &d[0] + } + return nil +} + +// Newest is a convenience function that returns the newest delta, or +// nil if there are no deltas. +func (d Deltas) Newest() *Delta { + if n := len(d); n > 0 { + return &d[n-1] + } + return nil +} + +// copyDeltas returns a shallow copy of d; that is, it copies the slice but not +// the objects in the slice. This allows Get/List to return an object that we +// know won't be clobbered by a subsequent modifications. +func copyDeltas(d Deltas) Deltas { + d2 := make(Deltas, len(d)) + copy(d2, d) + return d2 +} + +// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where +// an object was deleted but the watch deletion event was missed. In this +// case we don't know the final "resting" state of the object, so there's +// a chance the included `Obj` is stale. +type DeletedFinalStateUnknown struct { + Key string + Obj interface{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/doc.go b/vendor/k8s.io/client-go/tools/cache/doc.go similarity index 53% rename from vendor/k8s.io/kubernetes/pkg/api/doc.go rename to vendor/k8s.io/client-go/tools/cache/doc.go index 283a83e40..56b61d300 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/doc.go +++ b/vendor/k8s.io/client-go/tools/cache/doc.go @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register - -// Package api contains the latest (or "internal") version of the -// Kubernetes API objects. This is the API objects as represented in memory. -// The contract presented to clients is located in the versioned packages, -// which are sub-directories. The first one is "v1". Those packages -// describe how a particular version is serialized to storage/network. -package api // import "k8s.io/kubernetes/pkg/api" +// Package cache is a client-side caching mechanism. It is useful for +// reducing the number of server calls you'd otherwise need to make. +// Reflector watches a server and updates a Store. Two stores are provided; +// one that simply caches objects (for example, to allow a scheduler to +// list currently available nodes), and one that additionally acts as +// a FIFO queue (for example, to allow a scheduler to process incoming +// pods). +package cache // import "k8s.io/client-go/tools/cache" diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go new file mode 100644 index 000000000..fa88fc407 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -0,0 +1,208 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/util/clock" +) + +// ExpirationCache implements the store interface +// 1. All entries are automatically time stamped on insert +// a. The key is computed based off the original item/keyFunc +// b. The value inserted under that key is the timestamped item +// 2. Expiration happens lazily on read based on the expiration policy +// a. No item can be inserted into the store while we're expiring +// *any* item in the cache. +// 3. Time-stamps are stripped off unexpired entries before return +// Note that the ExpirationCache is inherently slower than a normal +// threadSafeStore because it takes a write lock every time it checks if +// an item has expired. +type ExpirationCache struct { + cacheStorage ThreadSafeStore + keyFunc KeyFunc + clock clock.Clock + expirationPolicy ExpirationPolicy + // expirationLock is a write lock used to guarantee that we don't clobber + // newly inserted objects because of a stale expiration timestamp comparison + expirationLock sync.Mutex +} + +// ExpirationPolicy dictates when an object expires. Currently only abstracted out +// so unittests don't rely on the system clock. +type ExpirationPolicy interface { + IsExpired(obj *timestampedEntry) bool +} + +// TTLPolicy implements a ttl based ExpirationPolicy. +type TTLPolicy struct { + // >0: Expire entries with an age > ttl + // <=0: Don't expire any entry + Ttl time.Duration + + // Clock used to calculate ttl expiration + Clock clock.Clock +} + +// IsExpired returns true if the given object is older than the ttl, or it can't +// determine its age. +func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool { + return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl +} + +// timestampedEntry is the only type allowed in a ExpirationCache. +type timestampedEntry struct { + obj interface{} + timestamp time.Time +} + +// getTimestampedEntry returns the timestampedEntry stored under the given key. +func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) { + item, _ := c.cacheStorage.Get(key) + if tsEntry, ok := item.(*timestampedEntry); ok { + return tsEntry, true + } + return nil, false +} + +// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't +// already expired. It holds a write lock across deletion. +func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { + // Prevent all inserts from the time we deem an item as "expired" to when we + // delete it, so an un-expired item doesn't sneak in under the same key, just + // before the Delete. + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + timestampedItem, exists := c.getTimestampedEntry(key) + if !exists { + return nil, false + } + if c.expirationPolicy.IsExpired(timestampedItem) { + glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) + c.cacheStorage.Delete(key) + return nil, false + } + return timestampedItem.obj, true +} + +// GetByKey returns the item stored under the key, or sets exists=false. +func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) { + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// Get returns unexpired items. It purges the cache of expired items in the +// process. +func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// List retrieves a list of unexpired items. It purges the cache of expired +// items in the process. +func (c *ExpirationCache) List() []interface{} { + items := c.cacheStorage.List() + + list := make([]interface{}, 0, len(items)) + for _, item := range items { + obj := item.(*timestampedEntry).obj + if key, err := c.keyFunc(obj); err != nil { + list = append(list, obj) + } else if obj, exists := c.getOrExpire(key); exists { + list = append(list, obj) + } + } + return list +} + +// ListKeys returns a list of all keys in the expiration cache. +func (c *ExpirationCache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// Add timestamps an item and inserts it into the cache, overwriting entries +// that might exist under the same key. +func (c *ExpirationCache) Add(obj interface{}) error { + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()}) + return nil +} + +// Update has not been implemented yet for lack of a use case, so this method +// simply calls `Add`. This effectively refreshes the timestamp. +func (c *ExpirationCache) Update(obj interface{}) error { + return c.Add(obj) +} + +// Delete removes an item from the cache. +func (c *ExpirationCache) Delete(obj interface{}) error { + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Delete(key) + return nil +} + +// Replace will convert all items in the given list to TimestampedEntries +// before attempting the replace operation. The replace operation will +// delete the contents of the ExpirationCache `c`. +func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + items := map[string]interface{}{} + ts := c.clock.Now() + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = ×tampedEntry{item, ts} + } + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync will touch all objects to put them into the processing queue +func (c *ExpirationCache) Resync() error { + return c.cacheStorage.Resync() +} + +// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy +func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { + return &ExpirationCache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + clock: clock.RealClock{}, + expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}}, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go new file mode 100644 index 000000000..a096765f6 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -0,0 +1,54 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" +) + +type fakeThreadSafeMap struct { + ThreadSafeStore + deletedKeys chan<- string +} + +func (c *fakeThreadSafeMap) Delete(key string) { + if c.deletedKeys != nil { + c.ThreadSafeStore.Delete(key) + c.deletedKeys <- key + } +} + +type FakeExpirationPolicy struct { + NeverExpire sets.String + RetrieveKeyFunc KeyFunc +} + +func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool { + key, _ := p.RetrieveKeyFunc(obj) + return !p.NeverExpire.Has(key) +} + +func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { + cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) + return &ExpirationCache{ + cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys}, + keyFunc: keyFunc, + clock: cacheClock, + expirationPolicy: expirationPolicy, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go new file mode 100644 index 000000000..8d71c2474 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// FakeStore lets you define custom functions for store operations +type FakeCustomStore struct { + AddFunc func(obj interface{}) error + UpdateFunc func(obj interface{}) error + DeleteFunc func(obj interface{}) error + ListFunc func() []interface{} + ListKeysFunc func() []string + GetFunc func(obj interface{}) (item interface{}, exists bool, err error) + GetByKeyFunc func(key string) (item interface{}, exists bool, err error) + ReplaceFunc func(list []interface{}, resourceVerion string) error + ResyncFunc func() error +} + +// Add calls the custom Add function if defined +func (f *FakeCustomStore) Add(obj interface{}) error { + if f.AddFunc != nil { + return f.AddFunc(obj) + } + return nil +} + +// Update calls the custom Update function if defined +func (f *FakeCustomStore) Update(obj interface{}) error { + if f.UpdateFunc != nil { + return f.Update(obj) + } + return nil +} + +// Delete calls the custom Delete function if defined +func (f *FakeCustomStore) Delete(obj interface{}) error { + if f.DeleteFunc != nil { + return f.DeleteFunc(obj) + } + return nil +} + +// List calls the custom List function if defined +func (f *FakeCustomStore) List() []interface{} { + if f.ListFunc != nil { + return f.ListFunc() + } + return nil +} + +// ListKeys calls the custom ListKeys function if defined +func (f *FakeCustomStore) ListKeys() []string { + if f.ListKeysFunc != nil { + return f.ListKeysFunc() + } + return nil +} + +// Get calls the custom Get function if defined +func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) { + if f.GetFunc != nil { + return f.GetFunc(obj) + } + return nil, false, nil +} + +// GetByKey calls the custom GetByKey function if defined +func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) { + if f.GetByKeyFunc != nil { + return f.GetByKeyFunc(key) + } + return nil, false, nil +} + +// Replace calls the custom Replace function if defined +func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error { + if f.ReplaceFunc != nil { + return f.ReplaceFunc(list, resourceVersion) + } + return nil +} + +// Resync calls the custom Resync function if defined +func (f *FakeCustomStore) Resync() error { + if f.ResyncFunc != nil { + return f.ResyncFunc() + } + return nil +} diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go new file mode 100644 index 000000000..e05c01ee2 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -0,0 +1,358 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// PopProcessFunc is passed to Pop() method of Queue interface. +// It is supposed to process the element popped from the queue. +type PopProcessFunc func(interface{}) error + +// ErrRequeue may be returned by a PopProcessFunc to safely requeue +// the current item. The value of Err will be returned from Pop. +type ErrRequeue struct { + // Err is returned by the Pop function + Err error +} + +var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue") + +func (e ErrRequeue) Error() string { + if e.Err == nil { + return "the popped item should be requeued without returning an error" + } + return e.Err.Error() +} + +// Queue is exactly like a Store, but has a Pop() method too. +type Queue interface { + Store + + // Pop blocks until it has something to process. + // It returns the object that was process and the result of processing. + // The PopProcessFunc may return an ErrRequeue{...} to indicate the item + // should be requeued before releasing the lock on the queue. + Pop(PopProcessFunc) (interface{}, error) + + // AddIfNotPresent adds a value previously + // returned by Pop back into the queue as long + // as nothing else (presumably more recent) + // has since been added. + AddIfNotPresent(interface{}) error + + // HasSynced returns true if the first batch of items has been popped + HasSynced() bool + + // Close queue + Close() +} + +// Helper function for popping from Queue. +// WARNING: Do NOT use this function in non-test code to avoid races +// unless you really really really really know what you are doing. +func Pop(queue Queue) interface{} { + var result interface{} + queue.Pop(func(obj interface{}) error { + result = obj + return nil + }) + return result +} + +// FIFO receives adds and updates from a Reflector, and puts them in a queue for +// FIFO order processing. If multiple adds/updates of a single item happen while +// an item is in the queue before it has been processed, it will only be +// processed once, and when it is processed, the most recent version will be +// processed. This can't be done with a channel. +// +// FIFO solves this use case: +// * You want to process every object (exactly) once. +// * You want to process the most recent version of the object when you process it. +// * You do not want to process deleted objects, they should be removed from the queue. +// * You do not want to periodically reprocess objects. +// Compare with DeltaFIFO for other use cases. +type FIFO struct { + lock sync.RWMutex + cond sync.Cond + // We depend on the property that items in the set are in the queue and vice versa. + items map[string]interface{} + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + + // Indication the queue is closed. + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRED operations. + closed bool + closedLock sync.Mutex +} + +var ( + _ = Queue(&FIFO{}) // FIFO is a Queue +) + +// Close the queue. +func (f *FIFO) Close() { + f.closedLock.Lock() + defer f.closedLock.Unlock() + f.closed = true + f.cond.Broadcast() +} + +// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// or an Update called first but the first batch of items inserted by Replace() has been popped +func (f *FIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *FIFO) Add(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if _, exists := f.items[id]; !exists { + f.queue = append(f.queue, id) + } + f.items[id] = obj + f.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (f *FIFO) AddIfNotPresent(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.addIfNotPresent(id, obj) + return nil +} + +// addIfNotPresent assumes the fifo lock is already held and adds the provided +// item to the queue under id if it does not already exist. +func (f *FIFO) addIfNotPresent(id string, obj interface{}) { + f.populated = true + if _, exists := f.items[id]; exists { + return + } + + f.queue = append(f.queue, id) + f.items[id] = obj + f.cond.Broadcast() +} + +// Update is the same as Add in this implementation. +func (f *FIFO) Update(obj interface{}) error { + return f.Add(obj) +} + +// Delete removes an item. It doesn't add it to the queue, because +// this implementation assumes the consumer only cares about the objects, +// not the order in which they were created/added. +func (f *FIFO) Delete(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + delete(f.items, id) + return err +} + +// List returns a list of all the items. +func (f *FIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *FIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.items)) + for key := range f.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + item, exists = f.items[key] + return item, exists, nil +} + +// Checks if the queue is closed +func (f *FIFO) IsClosed() bool { + f.closedLock.Lock() + defer f.closedLock.Unlock() + if f.closed { + return true + } + return false +} + +// Pop waits until an item is ready and processes it. If multiple items are +// ready, they are returned in the order in which they were added/updated. +// The item is removed from the queue (and the store) before it is processed, +// so if you don't successfully process it, it should be added back with +// AddIfNotPresent(). process function is called under lock, so it is safe +// update data structures in it that need to be in sync with the queue. +func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.IsClosed() { + return nil, FIFOClosedError + } + + f.cond.Wait() + } + id := f.queue[0] + f.queue = f.queue[1:] + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + item, ok := f.items[id] + if !ok { + // Item may have been deleted subsequently. + continue + } + delete(f.items, id) + err := process(item) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } + return item, err + } +} + +// Replace will delete the contents of 'f', using instead the given map. +// 'f' takes ownership of the map, you should not reference the map again +// after calling this function. f's queue is reset, too; upon return, it +// will contain the items in the map, in no particular order. +func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { + items := map[string]interface{}{} + for _, item := range list { + key, err := f.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + + f.lock.Lock() + defer f.lock.Unlock() + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(items) + } + + f.items = items + f.queue = f.queue[:0] + for id := range items { + f.queue = append(f.queue, id) + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// Resync will touch all objects to put them into the processing queue +func (f *FIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + inQueue := sets.NewString() + for _, id := range f.queue { + inQueue.Insert(id) + } + for id := range f.items { + if !inQueue.Has(id) { + f.queue = append(f.queue, id) + } + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// NewFIFO returns a Store which can be used to queue up items to +// process. +func NewFIFO(keyFunc KeyFunc) *FIFO { + f := &FIFO{ + items: map[string]interface{}{}, + queue: []string{}, + keyFunc: keyFunc, + } + f.cond.L = &f.lock + return f +} diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go new file mode 100644 index 000000000..78e492455 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -0,0 +1,323 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file implements a heap data structure. + +package cache + +import ( + "container/heap" + "fmt" + "sync" +) + +const ( + closedMsg = "heap is closed" +) + +type LessFunc func(interface{}, interface{}) bool +type heapItem struct { + obj interface{} // The object which is stored in the heap. + index int // The index of the object's key in the Heap.queue. +} + +type itemKeyValue struct { + key string + obj interface{} +} + +// heapData is an internal struct that implements the standard heap interface +// and keeps the data stored in the heap. +type heapData struct { + // items is a map from key of the objects to the objects and their index. + // We depend on the property that items in the map are in the queue and vice versa. + items map[string]*heapItem + // queue implements a heap data structure and keeps the order of elements + // according to the heap invariant. The queue keeps the keys of objects stored + // in "items". + queue []string + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + // lessFunc is used to compare two objects in the heap. + lessFunc LessFunc +} + +var ( + _ = heap.Interface(&heapData{}) // heapData is a standard heap +) + +// Less compares two objects and returns true if the first one should go +// in front of the second one in the heap. +func (h *heapData) Less(i, j int) bool { + if i > len(h.queue) || j > len(h.queue) { + return false + } + itemi, ok := h.items[h.queue[i]] + if !ok { + return false + } + itemj, ok := h.items[h.queue[j]] + if !ok { + return false + } + return h.lessFunc(itemi.obj, itemj.obj) +} + +// Len returns the number of items in the Heap. +func (h *heapData) Len() int { return len(h.queue) } + +// Swap implements swapping of two elements in the heap. This is a part of standard +// heap interface and should never be called directly. +func (h *heapData) Swap(i, j int) { + h.queue[i], h.queue[j] = h.queue[j], h.queue[i] + item := h.items[h.queue[i]] + item.index = i + item = h.items[h.queue[j]] + item.index = j +} + +// Push is supposed to be called by heap.Push only. +func (h *heapData) Push(kv interface{}) { + keyValue := kv.(*itemKeyValue) + n := len(h.queue) + h.items[keyValue.key] = &heapItem{keyValue.obj, n} + h.queue = append(h.queue, keyValue.key) +} + +// Pop is supposed to be called by heap.Pop only. +func (h *heapData) Pop() interface{} { + key := h.queue[len(h.queue)-1] + h.queue = h.queue[0 : len(h.queue)-1] + item, ok := h.items[key] + if !ok { + // This is an error + return nil + } + delete(h.items, key) + return item.obj +} + +// Heap is a thread-safe producer/consumer queue that implements a heap data structure. +// It can be used to implement priority queues and similar data structures. +type Heap struct { + lock sync.RWMutex + cond sync.Cond + + // data stores objects and has a queue that keeps their ordering according + // to the heap invariant. + data *heapData + + // closed indicates that the queue is closed. + // It is mainly used to let Pop() exit its control loop while waiting for an item. + closed bool +} + +// Close the Heap and signals condition variables that may be waiting to pop +// items from the heap. +func (h *Heap) Close() { + h.lock.Lock() + defer h.lock.Unlock() + h.closed = true + h.cond.Broadcast() +} + +// Add inserts an item, and puts it in the queue. The item is updated if it +// already exists. +func (h *Heap) Add(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + h.cond.Broadcast() + return nil +} + +// Adds all the items in the list to the queue and then signals the condition +// variable. It is useful when the caller would like to add all of the items +// to the queue before consumer starts processing them. +func (h *Heap) BulkAdd(list []interface{}) error { + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + for _, obj := range list { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + } + h.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If an item with +// the key is present in the map, no changes is made to the item. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (h *Heap) AddIfNotPresent(obj interface{}) error { + id, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + h.addIfNotPresentLocked(id, obj) + h.cond.Broadcast() + return nil +} + +// addIfNotPresentLocked assumes the lock is already held and adds the the provided +// item to the queue if it does not already exist. +func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { + if _, exists := h.data.items[key]; exists { + return + } + heap.Push(h.data, &itemKeyValue{key, obj}) +} + +// Update is the same as Add in this implementation. When the item does not +// exist, it is added. +func (h *Heap) Update(obj interface{}) error { + return h.Add(obj) +} + +// Delete removes an item. +func (h *Heap) Delete(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if item, ok := h.data.items[key]; ok { + heap.Remove(h.data, item.index) + return nil + } + return fmt.Errorf("object not found") +} + +// Pop waits until an item is ready. If multiple items are +// ready, they are returned in the order given by Heap.data.lessFunc. +func (h *Heap) Pop() (interface{}, error) { + h.lock.Lock() + defer h.lock.Unlock() + for len(h.data.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the h.closed is set and the condition is broadcast, + // which causes this loop to continue and return from the Pop(). + if h.closed { + return nil, fmt.Errorf("heap is closed") + } + h.cond.Wait() + } + obj := heap.Pop(h.data) + if obj != nil { + return obj, nil + } else { + return nil, fmt.Errorf("object was removed from heap data") + } +} + +// List returns a list of all the items. +func (h *Heap) List() []interface{} { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]interface{}, 0, len(h.data.items)) + for _, item := range h.data.items { + list = append(list, item.obj) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently in the Heap. +func (h *Heap) ListKeys() []string { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]string, 0, len(h.data.items)) + for key := range h.data.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (h *Heap) Get(obj interface{}) (interface{}, bool, error) { + key, err := h.data.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return h.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (h *Heap) GetByKey(key string) (interface{}, bool, error) { + h.lock.RLock() + defer h.lock.RUnlock() + item, exists := h.data.items[key] + if !exists { + return nil, false, nil + } + return item.obj, true, nil +} + +// IsClosed returns true if the queue is closed. +func (h *Heap) IsClosed() bool { + h.lock.RLock() + defer h.lock.RUnlock() + if h.closed { + return true + } + return false +} + +// NewHeap returns a Heap which can be used to queue up items to process. +func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { + h := &Heap{ + data: &heapData{ + items: map[string]*heapItem{}, + queue: []string{}, + keyFunc: keyFn, + lessFunc: lessFn, + }, + } + h.cond.L = &h.lock + return h +} diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go new file mode 100644 index 000000000..15acb168e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -0,0 +1,87 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Indexer is a storage interface that lets you list objects using multiple indexing functions +type Indexer interface { + Store + // Retrieve list of objects that match on the named indexing function + Index(indexName string, obj interface{}) ([]interface{}, error) + // IndexKeys returns the set of keys that match on the named indexing function. + IndexKeys(indexName, indexKey string) ([]string, error) + // ListIndexFuncValues returns the list of generated values of an Index func + ListIndexFuncValues(indexName string) []string + // ByIndex lists object that match on the named indexing function with the exact key + ByIndex(indexName, indexKey string) ([]interface{}, error) + // GetIndexer return the indexers + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error +} + +// IndexFunc knows how to provide an indexed value for an object. +type IndexFunc func(obj interface{}) ([]string, error) + +// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns +// unique values for every object. This is conversion can create errors when more than one key is found. You +// should prefer to make proper key and index functions. +func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { + return func(obj interface{}) (string, error) { + indexKeys, err := indexFunc(obj) + if err != nil { + return "", err + } + if len(indexKeys) > 1 { + return "", fmt.Errorf("too many keys: %v", indexKeys) + } + if len(indexKeys) == 0 { + return "", fmt.Errorf("unexpected empty indexKeys") + } + return indexKeys[0], nil + } +} + +const ( + NamespaceIndex string = "namespace" +) + +// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace +func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return []string{""}, fmt.Errorf("object has no meta: %v", err) + } + return []string{meta.GetNamespace()}, nil +} + +// Index maps the indexed value to a set of keys in the store that match on that value +type Index map[string]sets.String + +// Indexers maps a name to a IndexFunc +type Indexers map[string]IndexFunc + +// Indices maps a name to an Index +type Indices map[string]Index diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go new file mode 100644 index 000000000..27d51a6b3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -0,0 +1,160 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// AppendFunc is used to add a matching item to whatever list the caller is using +type AppendFunc func(interface{}) + +func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { + for _, m := range store.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil +} + +func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { + if namespace == metav1.NamespaceAll { + for _, m := range indexer.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil + } + + items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) + if err != nil { + // Ignore error; do slow search without index. + glog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range indexer.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + + } + return nil + } + for _, m := range items { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + + return nil +} + +// GenericLister is a lister skin on a generic Indexer +type GenericLister interface { + // List will return all objects across namespaces + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve assuming that name==key + Get(name string) (runtime.Object, error) + // ByNamespace will give you a GenericNamespaceLister for one namespace + ByNamespace(namespace string) GenericNamespaceLister +} + +// GenericNamespaceLister is a lister skin on a generic Indexer +type GenericNamespaceLister interface { + // List will return all objects in this namespace + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve by namespace and name + Get(name string) (runtime.Object, error) +} + +func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { + return &genericLister{indexer: indexer, resource: resource} +} + +type genericLister struct { + indexer Indexer + resource schema.GroupResource +} + +func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister { + return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource} +} + +func (s *genericLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} + +type genericNamespaceLister struct { + indexer Indexer + namespace string + resource schema.GroupResource +} + +func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go new file mode 100644 index 000000000..06657a3b0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -0,0 +1,188 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + "golang.org/x/net/context" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/pager" +) + +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { + // List should return a list type object; the Items field will be extracted, and the + // ResourceVersion field will be used to start the watch in the right place. + List(options metav1.ListOptions) (runtime.Object, error) + // Watch should begin a watch at the specified version. + Watch(options metav1.ListOptions) (watch.Interface, error) +} + +// ListFunc knows how to list resources +type ListFunc func(options metav1.ListOptions) (runtime.Object, error) + +// WatchFunc knows how to watch resources +type WatchFunc func(options metav1.ListOptions) (watch.Interface, error) + +// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface. +// It is a convenience function for users of NewReflector, etc. +// ListFunc and WatchFunc must not be nil +type ListWatch struct { + ListFunc ListFunc + WatchFunc WatchFunc + // DisableChunking requests no chunking for this list watcher. + DisableChunking bool +} + +// Getter interface knows how to access Get method from RESTClient. +type Getter interface { + Get() *restclient.Request +} + +// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. +func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { + optionsModifier := func(options *metav1.ListOptions) { + options.FieldSelector = fieldSelector.String() + } + return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier) +} + +// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier. +// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function +// to apply modification to ListOptions with a field selector, a label selector, or any other desired options. +func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch { + listFunc := func(options metav1.ListOptions) (runtime.Object, error) { + optionsModifier(&options) + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + Do(). + Get() + } + watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { + options.Watch = true + optionsModifier(&options) + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + Watch() + } + return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} +} + +func timeoutFromListOptions(options metav1.ListOptions) time.Duration { + if options.TimeoutSeconds != nil { + return time.Duration(*options.TimeoutSeconds) * time.Second + } + return 0 +} + +// List a set of apiserver resources +func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) { + if !lw.DisableChunking { + return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options) + } + return lw.ListFunc(options) +} + +// Watch a set of apiserver resources +func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) { + return lw.WatchFunc(options) +} + +// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout +// if timeout is exceeded without all conditions returning true, or an error if an error occurs. +// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until. +func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) { + if len(conditions) == 0 { + return nil, nil + } + + list, err := lw.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + initialItems, err := meta.ExtractList(list) + if err != nil { + return nil, err + } + + // use the initial items as simulated "adds" + var lastEvent *watch.Event + currIndex := 0 + passedConditions := 0 + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + passedConditions = passedConditions + 1 + continue + } + } + + ConditionSucceeded: + for currIndex < len(initialItems) { + lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]} + currIndex++ + + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + passedConditions = passedConditions + 1 + break ConditionSucceeded + } + } + } + if passedConditions == len(conditions) { + return lastEvent, nil + } + remainingConditions := conditions[passedConditions:] + + metaObj, err := meta.ListAccessor(list) + if err != nil { + return nil, err + } + currResourceVersion := metaObj.GetResourceVersion() + + watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion}) + if err != nil { + return nil, err + } + + evt, err := watch.Until(timeout, watchInterface, remainingConditions...) + if err == watch.ErrWatchClosed { + // present a consistent error interface to callers + err = wait.ErrWaitTimeout + } + return evt, err +} diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go new file mode 100644 index 000000000..cbb6434eb --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -0,0 +1,261 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strconv" + "sync" + "time" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilcache "k8s.io/apimachinery/pkg/util/cache" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// MutationCache is able to take the result of update operations and stores them in an LRU +// that can be used to provide a more current view of a requested object. It requires interpreting +// resourceVersions for comparisons. +// Implementations must be thread-safe. +// TODO find a way to layer this into an informer/lister +type MutationCache interface { + GetByKey(key string) (interface{}, bool, error) + ByIndex(indexName, indexKey string) ([]interface{}, error) + Mutation(interface{}) +} + +type ResourceVersionComparator interface { + CompareResourceVersion(lhs, rhs runtime.Object) int +} + +// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to +// deal with objects that have a resource version that: +// +// - is an integer +// - increases when updated +// - is comparable across the same resource in a namespace +// +// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item +// remains in the mutation cache before it is removed. +// +// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist +// in the underlying store. This is only safe if your use of the cache can handle mutation entries +// remaining in the cache for up to ttl when mutations and deletes occur very closely in time. +func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache { + return &mutationCache{ + backingCache: backingCache, + indexer: indexer, + mutationCache: utilcache.NewLRUExpireCache(100), + comparator: etcdObjectVersioner{}, + ttl: ttl, + includeAdds: includeAdds, + } +} + +// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and +// since you can't distinguish between, "didn't observe create" and "was deleted after create", +// if the key is missing from the backing cache, we always return it as missing +type mutationCache struct { + lock sync.Mutex + backingCache Store + indexer Indexer + mutationCache *utilcache.LRUExpireCache + includeAdds bool + ttl time.Duration + + comparator ResourceVersionComparator +} + +// GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could +// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key. +// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache. +func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) { + c.lock.Lock() + defer c.lock.Unlock() + + obj, exists, err := c.backingCache.GetByKey(key) + if err != nil { + return nil, false, err + } + if !exists { + if !c.includeAdds { + // we can't distinguish between, "didn't observe create" and "was deleted after create", so + // if the key is missing, we always return it as missing + return nil, false, nil + } + obj, exists = c.mutationCache.Get(key) + if !exists { + return nil, false, nil + } + } + objRuntime, ok := obj.(runtime.Object) + if !ok { + return obj, true, nil + } + return c.newerObject(key, objRuntime), true, nil +} + +// ByIndex returns the newer objects that match the provided index and indexer key. +// Will return an error if no indexer was provided. +func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) { + c.lock.Lock() + defer c.lock.Unlock() + if c.indexer == nil { + return nil, fmt.Errorf("no indexer has been provided to the mutation cache") + } + keys, err := c.indexer.IndexKeys(name, indexKey) + if err != nil { + return nil, err + } + var items []interface{} + keySet := sets.NewString() + for _, key := range keys { + keySet.Insert(key) + obj, exists, err := c.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + continue + } + if objRuntime, ok := obj.(runtime.Object); ok { + items = append(items, c.newerObject(key, objRuntime)) + } else { + items = append(items, obj) + } + } + + if c.includeAdds { + fn := c.indexer.GetIndexers()[name] + // Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior + for _, key := range c.mutationCache.Keys() { + updated, ok := c.mutationCache.Get(key) + if !ok { + continue + } + if keySet.Has(key.(string)) { + continue + } + elements, err := fn(updated) + if err != nil { + glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + continue + } + for _, inIndex := range elements { + if inIndex != indexKey { + continue + } + items = append(items, updated) + break + } + } + } + + return items, nil +} + +// newerObject checks the mutation cache for a newer object and returns one if found. If the +// mutated object is older than the backing object, it is removed from the Must be +// called while the lock is held. +func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object { + mutatedObj, exists := c.mutationCache.Get(key) + if !exists { + return backing + } + mutatedObjRuntime, ok := mutatedObj.(runtime.Object) + if !ok { + return backing + } + if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 { + c.mutationCache.Remove(key) + return backing + } + return mutatedObjRuntime +} + +// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache +// copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined +// which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is +// preserved, but you may not get the version of the object you want. The object you get is only guaranteed to +// "one that was valid at some point in time", not "the one that I want". +func (c *mutationCache) Mutation(obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + key, err := DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + // this is a "nice to have", so failures shouldn't do anything weird + utilruntime.HandleError(err) + return + } + + if objRuntime, ok := obj.(runtime.Object); ok { + if mutatedObj, exists := c.mutationCache.Get(key); exists { + if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok { + if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 { + return + } + } + } + } + c.mutationCache.Add(key, obj, c.ttl) +} + +// etcdObjectVersioner implements versioning and extracting etcd node information +// for objects that have an embedded ObjectMeta or ListMeta field. +type etcdObjectVersioner struct{} + +// ObjectResourceVersion implements Versioner +func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + version := accessor.GetResourceVersion() + if len(version) == 0 { + return 0, nil + } + return strconv.ParseUint(version, 10, 64) +} + +// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings, +// but etcd resource versions are special, they're actually ints, so we can easily compare them. +func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int { + lhsVersion, err := a.ObjectResourceVersion(lhs) + if err != nil { + // coder error + panic(err) + } + rhsVersion, err := a.ObjectResourceVersion(rhs) + if err != nil { + // coder error + panic(err) + } + + if lhsVersion == rhsVersion { + return 0 + } + if lhsVersion < rhsVersion { + return -1 + } + + return 1 +} diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go new file mode 100644 index 000000000..8e6338a1b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "os" + "reflect" + "strconv" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +var mutationDetectionEnabled = false + +func init() { + mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) +} + +type CacheMutationDetector interface { + AddObject(obj interface{}) + Run(stopCh <-chan struct{}) +} + +func NewCacheMutationDetector(name string) CacheMutationDetector { + if !mutationDetectionEnabled { + return dummyMutationDetector{} + } + return &defaultCacheMutationDetector{name: name, period: 1 * time.Second} +} + +type dummyMutationDetector struct{} + +func (dummyMutationDetector) Run(stopCh <-chan struct{}) { +} +func (dummyMutationDetector) AddObject(obj interface{}) { +} + +// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated +// It has a list of cached objects and their copies. I haven't thought of a way +// to see WHO is mutating it, just that it's getting mutated. +type defaultCacheMutationDetector struct { + name string + period time.Duration + + lock sync.Mutex + cachedObjs []cacheObj + + // failureFunc is injectable for unit testing. If you don't have it, the process will panic. + // This panic is intentional, since turning on this detection indicates you want a strong + // failure signal. This failure is effectively a p0 bug and you can't trust process results + // after a mutation anyway. + failureFunc func(message string) +} + +// cacheObj holds the actual object and a copy +type cacheObj struct { + cached interface{} + copied interface{} +} + +func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { + // we DON'T want protection from panics. If we're running this code, we want to die + for { + d.CompareObjects() + + select { + case <-stopCh: + return + case <-time.After(d.period): + } + } +} + +// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object +// but that covers the vast majority of our cached objects +func (d *defaultCacheMutationDetector) AddObject(obj interface{}) { + if _, ok := obj.(DeletedFinalStateUnknown); ok { + return + } + if obj, ok := obj.(runtime.Object); ok { + copiedObj := obj.DeepCopyObject() + + d.lock.Lock() + defer d.lock.Unlock() + d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj}) + } +} + +func (d *defaultCacheMutationDetector) CompareObjects() { + d.lock.Lock() + defer d.lock.Unlock() + + altered := false + for i, obj := range d.cachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied)) + altered = true + } + } + + if altered { + msg := fmt.Sprintf("cache %s modified", d.name) + if d.failureFunc != nil { + d.failureFunc(msg) + return + } + panic(msg) + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go new file mode 100644 index 000000000..054a7373c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -0,0 +1,449 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "net/url" + "reflect" + "regexp" + goruntime "runtime" + "runtime/debug" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/golang/glog" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" +) + +// Reflector watches a specified resource and causes all changes to be reflected in the given store. +type Reflector struct { + // name identifies this reflector. By default it will be a file:line if possible. + name string + // metrics tracks basic metric information about the reflector + metrics *reflectorMetrics + + // The type of object we expect to place in the store. + expectedType reflect.Type + // The destination to sync up with the watch source + store Store + // listerWatcher is used to perform lists and watches. + listerWatcher ListerWatcher + // period controls timing between one watch ending and + // the beginning of the next one. + period time.Duration + resyncPeriod time.Duration + ShouldResync func() bool + // clock allows tests to manipulate time + clock clock.Clock + // lastSyncResourceVersion is the resource version token last + // observed when doing a sync with the underlying store + // it is thread safe, but not synchronized with the underlying store + lastSyncResourceVersion string + // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion + lastSyncResourceVersionMutex sync.RWMutex +} + +var ( + // We try to spread the load on apiserver by setting timeouts for + // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. + // However, it can be modified to avoid periodic resync to break the + // TCP connection. + minWatchTimeout = 5 * time.Minute +) + +// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector +// The indexer is configured to key on namespace +func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { + indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc}) + reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) + return indexer, reflector +} + +// NewReflector creates a new Reflector object which will keep the given store up to +// date with the server's contents for the given resource. Reflector promises to +// only put things in the store that have the type of expectedType, unless expectedType +// is nil. If resyncPeriod is non-zero, then lists will be executed after every +// resyncPeriod, so that you can use reflectors to periodically process everything as +// well as incrementally processing the things that change. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewNamedReflector(getDefaultReflectorName(internalPackages...), lw, expectedType, store, resyncPeriod) +} + +// reflectorDisambiguator is used to disambiguate started reflectors. +// initialized to an unstable value to ensure meaning isn't attributed to the suffix. +var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345) + +// NewNamedReflector same as NewReflector, but with a specified name for logging +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1) + r := &Reflector{ + name: name, + // we need this to be unique per process (some names are still the same) but obvious who it belongs to + metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), + listerWatcher: lw, + store: store, + expectedType: reflect.TypeOf(expectedType), + period: time.Second, + resyncPeriod: resyncPeriod, + clock: &clock.RealClock{}, + } + return r +} + +func makeValidPrometheusMetricLabel(in string) string { + // this isn't perfect, but it removes our common characters + return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) +} + +// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common +// call chains to NewReflector, so they'd be low entropy names for reflectors +var internalPackages = []string{"client-go/tools/cache/", "/runtime/asm_"} + +// getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages +// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging +func getDefaultReflectorName(ignoredPackages ...string) string { + name := "????" + const maxStack = 10 + for i := 1; i < maxStack; i++ { + _, file, line, ok := goruntime.Caller(i) + if !ok { + file, line, ok = extractStackCreator() + if !ok { + break + } + i += maxStack + } + if hasPackage(file, ignoredPackages) { + continue + } + + file = trimPackagePrefix(file) + name = fmt.Sprintf("%s:%d", file, line) + break + } + return name +} + +// hasPackage returns true if the file is in one of the ignored packages. +func hasPackage(file string, ignoredPackages []string) bool { + for _, ignoredPackage := range ignoredPackages { + if strings.Contains(file, ignoredPackage) { + return true + } + } + return false +} + +// trimPackagePrefix reduces duplicate values off the front of a package name. +func trimPackagePrefix(file string) string { + if l := strings.LastIndex(file, "k8s.io/client-go/pkg/"); l >= 0 { + return file[l+len("k8s.io/client-go/"):] + } + if l := strings.LastIndex(file, "/src/"); l >= 0 { + return file[l+5:] + } + if l := strings.LastIndex(file, "/pkg/"); l >= 0 { + return file[l+1:] + } + return file +} + +var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`) + +// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false +// if the creator cannot be located. +// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440 +func extractStackCreator() (string, int, bool) { + stack := debug.Stack() + matches := stackCreator.FindStringSubmatch(string(stack)) + if matches == nil || len(matches) != 4 { + return "", 0, false + } + line, err := strconv.Atoi(matches[3]) + if err != nil { + return "", 0, false + } + return matches[2], line, true +} + +// Run starts a watch and handles watch events. Will restart the watch if it is closed. +// Run will exit when stopCh is closed. +func (r *Reflector) Run(stopCh <-chan struct{}) { + glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + wait.Until(func() { + if err := r.ListAndWatch(stopCh); err != nil { + utilruntime.HandleError(err) + } + }, r.period, stopCh) +} + +var ( + // nothing will ever be sent down this channel + neverExitWatch <-chan time.Time = make(chan time.Time) + + // Used to indicate that watching stopped so that a resync could happen. + errorResyncRequested = errors.New("resync channel fired") + + // Used to indicate that watching stopped because of a signal from the stop + // channel passed in from a client of the reflector. + errorStopRequested = errors.New("Stop requested") +) + +// resyncChan returns a channel which will receive something when a resync is +// required, and a cleanup function. +func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { + if r.resyncPeriod == 0 { + return neverExitWatch, func() bool { return false } + } + // The cleanup function is required: imagine the scenario where watches + // always fail so we end up listing frequently. Then, if we don't + // manually stop the timer, we could end up with many timers active + // concurrently. + t := r.clock.NewTimer(r.resyncPeriod) + return t.C(), t.Stop +} + +// ListAndWatch first lists all items and get the resource version at the moment of call, +// and then use the resource version to watch. +// It returns error if ListAndWatch didn't even try to initialize watch. +func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { + glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) + var resourceVersion string + + // Explicitly set "0" as resource version - it's fine for the List() + // to be served from cache and potentially be delayed relative to + // etcd contents. Reflector framework will catch up via Watch() eventually. + options := metav1.ListOptions{ResourceVersion: "0"} + r.metrics.numberOfLists.Inc() + start := r.clock.Now() + list, err := r.listerWatcher.List(options) + if err != nil { + return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) + } + r.metrics.listDuration.Observe(time.Since(start).Seconds()) + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + items, err := meta.ExtractList(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) + } + r.metrics.numberOfItemsInList.Observe(float64(len(items))) + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) + } + r.setLastSyncResourceVersion(resourceVersion) + + resyncerrc := make(chan error, 1) + cancelCh := make(chan struct{}) + defer close(cancelCh) + go func() { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() + for { + select { + case <-resyncCh: + case <-stopCh: + return + case <-cancelCh: + return + } + if r.ShouldResync == nil || r.ShouldResync() { + glog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err + return + } + } + cleanup() + resyncCh, cleanup = r.resyncChan() + } + }() + + for { + // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors + select { + case <-stopCh: + return nil + default: + } + + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options = metav1.ListOptions{ + ResourceVersion: resourceVersion, + // We want to avoid situations of hanging watchers. Stop any wachers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timeoutSeconds, + } + + r.metrics.numberOfWatches.Inc() + w, err := r.listerWatcher.Watch(options) + if err != nil { + switch err { + case io.EOF: + // watch closed normally + case io.ErrUnexpectedEOF: + glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) + default: + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err)) + } + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case wait and resend watch request. + if urlError, ok := err.(*url.Error); ok { + if opError, ok := urlError.Err.(*net.OpError); ok { + if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { + time.Sleep(time.Second) + continue + } + } + } + return nil + } + + if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { + if err != errorStopRequested { + glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + } + return nil + } + } +} + +// syncWith replaces the store's items with the given list. +func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { + found := make([]interface{}, 0, len(items)) + for _, item := range items { + found = append(found, item) + } + return r.store.Replace(found, resourceVersion) +} + +// watchHandler watches w and keeps *resourceVersion up to date. +func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { + start := r.clock.Now() + eventCount := 0 + + // Stopping the watcher should be idempotent and if we return from this function there's no way + // we're coming back in with the same watch interface. + defer w.Stop() + // update metrics + defer func() { + r.metrics.numberOfItemsInWatch.Observe(float64(eventCount)) + r.metrics.watchDuration.Observe(time.Since(start).Seconds()) + }() + +loop: + for { + select { + case <-stopCh: + return errorStopRequested + case err := <-errc: + return err + case event, ok := <-w.ResultChan(): + if !ok { + break loop + } + if event.Type == watch.Error { + return apierrs.FromObject(event.Object) + } + if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a)) + continue + } + meta, err := meta.Accessor(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + continue + } + newResourceVersion := meta.GetResourceVersion() + switch event.Type { + case watch.Added: + err := r.store.Add(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err)) + } + case watch.Modified: + err := r.store.Update(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err)) + } + case watch.Deleted: + // TODO: Will any consumers need access to the "last known + // state", which is passed in event.Object? If so, may need + // to change this. + err := r.store.Delete(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) + } + default: + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + } + *resourceVersion = newResourceVersion + r.setLastSyncResourceVersion(newResourceVersion) + eventCount++ + } + } + + watchDuration := r.clock.Now().Sub(start) + if watchDuration < 1*time.Second && eventCount == 0 { + r.metrics.numberOfShortWatches.Inc() + return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) + } + glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) + return nil +} + +// LastSyncResourceVersion is the resource version observed when last sync with the underlying store +// The value returned is not synchronized with access to the underlying store and is not thread-safe +func (r *Reflector) LastSyncResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + return r.lastSyncResourceVersion +} + +func (r *Reflector) setLastSyncResourceVersion(v string) { + r.lastSyncResourceVersionMutex.Lock() + defer r.lastSyncResourceVersionMutex.Unlock() + r.lastSyncResourceVersion = v + + rv, err := strconv.Atoi(v) + if err == nil { + r.metrics.lastResourceVersion.Set(float64(rv)) + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go new file mode 100644 index 000000000..0945e5c3a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +package cache + +import ( + "sync" +) + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type GaugeMetric interface { + Set(float64) +} + +// CounterMetric represents a single numerical value that only ever +// goes up. +type CounterMetric interface { + Inc() +} + +// SummaryMetric captures individual observations. +type SummaryMetric interface { + Observe(float64) +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Observe(float64) {} +func (noopMetric) Set(float64) {} + +type reflectorMetrics struct { + numberOfLists CounterMetric + listDuration SummaryMetric + numberOfItemsInList SummaryMetric + + numberOfWatches CounterMetric + numberOfShortWatches CounterMetric + watchDuration SummaryMetric + numberOfItemsInWatch SummaryMetric + + lastResourceVersion GaugeMetric +} + +// MetricsProvider generates various metrics used by the reflector. +type MetricsProvider interface { + NewListsMetric(name string) CounterMetric + NewListDurationMetric(name string) SummaryMetric + NewItemsInListMetric(name string) SummaryMetric + + NewWatchesMetric(name string) CounterMetric + NewShortWatchesMetric(name string) CounterMetric + NewWatchDurationMetric(name string) SummaryMetric + NewItemsInWatchMetric(name string) SummaryMetric + + NewLastResourceVersionMetric(name string) GaugeMetric +} + +type noopMetricsProvider struct{} + +func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric { + return noopMetric{} +} + +var metricsFactory = struct { + metricsProvider MetricsProvider + setProviders sync.Once +}{ + metricsProvider: noopMetricsProvider{}, +} + +func newReflectorMetrics(name string) *reflectorMetrics { + var ret *reflectorMetrics + if len(name) == 0 { + return ret + } + return &reflectorMetrics{ + numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name), + listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name), + numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name), + numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name), + numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name), + watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name), + numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name), + lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name), + } +} + +// SetReflectorMetricsProvider sets the metrics provider +func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { + metricsFactory.setProviders.Do(func() { + metricsFactory.metricsProvider = metricsProvider + }) +} diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go new file mode 100644 index 000000000..5f8c507f9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -0,0 +1,597 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/buffer" + "k8s.io/client-go/util/retry" + + "github.com/golang/glog" +) + +// SharedInformer has a shared data cache and is capable of distributing notifications for changes +// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is +// one behavior change compared to a standard Informer. When you receive a notification, the cache +// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend +// on the contents of the cache exactly matching the notification you've received in handler +// functions. If there was a create, followed by a delete, the cache may NOT have your item. This +// has advantages over the broadcaster since it allows us to share a common cache across many +// controllers. Extending the broadcaster would have required us keep duplicate caches for each +// watch. +type SharedInformer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + AddEventHandler(handler ResourceEventHandler) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the + // specified resync period. Events to a single handler are delivered sequentially, but there is + // no coordination between different handlers. + AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) + // GetStore returns the Store. + GetStore() Store + // GetController gives back a synthetic interface that "votes" to start the informer + GetController() Controller + // Run starts the shared informer, which will be stopped when stopCh is closed. + Run(stopCh <-chan struct{}) + // HasSynced returns true if the shared informer's store has synced. + HasSynced() bool + // LastSyncResourceVersion is the resource version observed when last synced with the underlying + // store. The value returned is not synchronized with access to the underlying store and is not + // thread-safe. + LastSyncResourceVersion() string +} + +type SharedIndexInformer interface { + SharedInformer + // AddIndexers add indexers to the informer before it starts. + AddIndexers(indexers Indexers) error + GetIndexer() Indexer +} + +// NewSharedInformer creates a new instance for the listwatcher. +func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer { + return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{}) +} + +// NewSharedIndexInformer creates a new instance for the listwatcher. +func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + realClock := &clock.RealClock{} + sharedIndexInformer := &sharedIndexInformer{ + processor: &sharedProcessor{clock: realClock}, + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), + listerWatcher: lw, + objectType: objType, + resyncCheckPeriod: defaultEventHandlerResyncPeriod, + defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)), + clock: realClock, + } + return sharedIndexInformer +} + +// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. +type InformerSynced func() bool + +const ( + // syncedPollPeriod controls how often you look at the status of your sync funcs + syncedPollPeriod = 100 * time.Millisecond + + // initialBufferSize is the initial number of event notifications that can be buffered. + initialBufferSize = 1024 +) + +// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false +// if the controller should shutdown +func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + err := wait.PollUntil(syncedPollPeriod, + func() (bool, error) { + for _, syncFunc := range cacheSyncs { + if !syncFunc() { + return false, nil + } + } + return true, nil + }, + stopCh) + if err != nil { + glog.V(2).Infof("stop requested") + return false + } + + glog.V(4).Infof("caches populated") + return true +} + +type sharedIndexInformer struct { + indexer Indexer + controller Controller + + processor *sharedProcessor + cacheMutationDetector CacheMutationDetector + + // This block is tracked to handle late initialization of the controller + listerWatcher ListerWatcher + objectType runtime.Object + + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call + // shouldResync to check if any of our listeners need a resync. + resyncCheckPeriod time.Duration + // defaultEventHandlerResyncPeriod is the default resync period for any handlers added via + // AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default + // value). + defaultEventHandlerResyncPeriod time.Duration + // clock allows for testability + clock clock.Clock + + started, stopped bool + startedLock sync.Mutex + + // blockDeltas gives a way to stop all event distribution so that a late event handler + // can safely join the shared informer. + blockDeltas sync.Mutex +} + +// dummyController hides the fact that a SharedInformer is different from a dedicated one +// where a caller can `Run`. The run method is disconnected in this case, because higher +// level logic will decide when to start the SharedInformer and related controller. +// Because returning information back is always asynchronous, the legacy callers shouldn't +// notice any change in behavior. +type dummyController struct { + informer *sharedIndexInformer +} + +func (v *dummyController) Run(stopCh <-chan struct{}) { +} + +func (v *dummyController) HasSynced() bool { + return v.informer.HasSynced() +} + +func (c *dummyController) LastSyncResourceVersion() string { + return "" +} + +type updateNotification struct { + oldObj interface{} + newObj interface{} +} + +type addNotification struct { + newObj interface{} +} + +type deleteNotification struct { + oldObj interface{} +} + +func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, + + Process: s.HandleDeltas, + } + + func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + s.controller = New(cfg) + s.controller.(*controller).clock = s.clock + s.started = true + }() + + // Separate stop channel because Processor should be stopped strictly after controller + processorStopCh := make(chan struct{}) + var wg wait.Group + defer wg.Wait() // Wait for Processor to stop + defer close(processorStopCh) // Tell Processor to stop + wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run) + wg.StartWithChannel(processorStopCh, s.processor.run) + + defer func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + s.stopped = true // Don't want any new listeners + }() + s.controller.Run(stopCh) +} + +func (s *sharedIndexInformer) HasSynced() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return false + } + return s.controller.HasSynced() +} + +func (s *sharedIndexInformer) LastSyncResourceVersion() string { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return "" + } + return s.controller.LastSyncResourceVersion() +} + +func (s *sharedIndexInformer) GetStore() Store { + return s.indexer +} + +func (s *sharedIndexInformer) GetIndexer() Indexer { + return s.indexer +} + +func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + return s.indexer.AddIndexers(indexers) +} + +func (s *sharedIndexInformer) GetController() Controller { + return &dummyController{informer: s} +} + +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) { + s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) +} + +func determineResyncPeriod(desired, check time.Duration) time.Duration { + if desired == 0 { + return desired + } + if check == 0 { + glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + return 0 + } + if desired < check { + glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + return check + } + return desired +} + +const minimumResyncPeriod = 1 * time.Second + +func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.stopped { + glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) + return + } + + if resyncPeriod > 0 { + if resyncPeriod < minimumResyncPeriod { + glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + resyncPeriod = minimumResyncPeriod + } + + if resyncPeriod < s.resyncCheckPeriod { + if s.started { + glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + resyncPeriod = s.resyncCheckPeriod + } else { + // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update + // resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners + // accordingly + s.resyncCheckPeriod = resyncPeriod + s.processor.resyncCheckPeriodChanged(resyncPeriod) + } + } + } + + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) + + if !s.started { + s.processor.addListener(listener) + return + } + + // in order to safely join, we have to + // 1. stop sending add/update/delete notifications + // 2. do a list against the store + // 3. send synthetic "Add" events to the new handler + // 4. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + s.processor.addListener(listener) + for _, item := range s.indexer.List() { + listener.add(addNotification{newObj: item}) + } +} + +func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + // from oldest to newest + for _, d := range obj.(Deltas) { + switch d.Type { + case Sync, Added, Updated: + isSync := d.Type == Sync + s.cacheMutationDetector.AddObject(d.Object) + if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { + if err := s.indexer.Update(d.Object); err != nil { + return err + } + s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync) + } else { + if err := s.indexer.Add(d.Object); err != nil { + return err + } + s.processor.distribute(addNotification{newObj: d.Object}, isSync) + } + case Deleted: + if err := s.indexer.Delete(d.Object); err != nil { + return err + } + s.processor.distribute(deleteNotification{oldObj: d.Object}, false) + } + } + return nil +} + +type sharedProcessor struct { + listenersStarted bool + listenersLock sync.RWMutex + listeners []*processorListener + syncingListeners []*processorListener + clock clock.Clock + wg wait.Group +} + +func (p *sharedProcessor) addListener(listener *processorListener) { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + p.addListenerLocked(listener) + if p.listenersStarted { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } +} + +func (p *sharedProcessor) addListenerLocked(listener *processorListener) { + p.listeners = append(p.listeners, listener) + p.syncingListeners = append(p.syncingListeners, listener) +} + +func (p *sharedProcessor) distribute(obj interface{}, sync bool) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + if sync { + for _, listener := range p.syncingListeners { + listener.add(obj) + } + } else { + for _, listener := range p.listeners { + listener.add(obj) + } + } +} + +func (p *sharedProcessor) run(stopCh <-chan struct{}) { + func() { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + for _, listener := range p.listeners { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } + p.listenersStarted = true + }() + <-stopCh + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + for _, listener := range p.listeners { + close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop + } + p.wg.Wait() // Wait for all .pop() and .run() to stop +} + +// shouldResync queries every listener to determine if any of them need a resync, based on each +// listener's resyncPeriod. +func (p *sharedProcessor) shouldResync() bool { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + p.syncingListeners = []*processorListener{} + + resyncNeeded := false + now := p.clock.Now() + for _, listener := range p.listeners { + // need to loop through all the listeners to see if they need to resync so we can prepare any + // listeners that are going to be resyncing. + if listener.shouldResync(now) { + resyncNeeded = true + p.syncingListeners = append(p.syncingListeners, listener) + listener.determineNextResync(now) + } + } + return resyncNeeded +} + +func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for _, listener := range p.listeners { + resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod) + listener.setResyncPeriod(resyncPeriod) + } +} + +type processorListener struct { + nextCh chan interface{} + addCh chan interface{} + + handler ResourceEventHandler + + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. + // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better. + pendingNotifications buffer.RingGrowing + + // requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer + requestedResyncPeriod time.Duration + // resyncPeriod is how frequently the listener wants a full resync from the shared informer. This + // value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the + // informer's overall resync check period. + resyncPeriod time.Duration + // nextResync is the earliest time the listener should get a full resync + nextResync time.Time + // resyncLock guards access to resyncPeriod and nextResync + resyncLock sync.Mutex +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { + ret := &processorListener{ + nextCh: make(chan interface{}), + addCh: make(chan interface{}), + handler: handler, + pendingNotifications: *buffer.NewRingGrowing(bufferSize), + requestedResyncPeriod: requestedResyncPeriod, + resyncPeriod: resyncPeriod, + } + + ret.determineNextResync(now) + + return ret +} + +func (p *processorListener) add(notification interface{}) { + p.addCh <- notification +} + +func (p *processorListener) pop() { + defer utilruntime.HandleCrash() + defer close(p.nextCh) // Tell .run() to stop + + var nextCh chan<- interface{} + var notification interface{} + for { + select { + case nextCh <- notification: + // Notification dispatched + var ok bool + notification, ok = p.pendingNotifications.ReadOne() + if !ok { // Nothing to pop + nextCh = nil // Disable this select case + } + case notificationToAdd, ok := <-p.addCh: + if !ok { + return + } + if notification == nil { // No notification to pop (and pendingNotifications is empty) + // Optimize the case - skip adding to pendingNotifications + notification = notificationToAdd + nextCh = p.nextCh + } else { // There is already a notification waiting to be dispatched + p.pendingNotifications.WriteOne(notificationToAdd) + } + } + } +} + +func (p *processorListener) run() { + // this call blocks until the channel is closed. When a panic happens during the notification + // we will catch it, **the offending item will be skipped!**, and after a short delay (one second) + // the next notification will be attempted. This is usually better than the alternative of never + // delivering again. + stopCh := make(chan struct{}) + wait.Until(func() { + // this gives us a few quick retries before a long pause and then a few more quick retries + err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) { + for next := range p.nextCh { + switch notification := next.(type) { + case updateNotification: + p.handler.OnUpdate(notification.oldObj, notification.newObj) + case addNotification: + p.handler.OnAdd(notification.newObj) + case deleteNotification: + p.handler.OnDelete(notification.oldObj) + default: + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) + } + } + // the only way to get here is if the p.nextCh is empty and closed + return true, nil + }) + + // the only way to get here is if the p.nextCh is empty and closed + if err == nil { + close(stopCh) + } + }, 1*time.Minute, stopCh) +} + +// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0, +// this always returns false. +func (p *processorListener) shouldResync(now time.Time) bool { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + if p.resyncPeriod == 0 { + return false + } + + return now.After(p.nextResync) || now.Equal(p.nextResync) +} + +func (p *processorListener) determineNextResync(now time.Time) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.nextResync = now.Add(p.resyncPeriod) +} + +func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.resyncPeriod = resyncPeriod +} diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go new file mode 100755 index 000000000..4958987f0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -0,0 +1,244 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" +) + +// Store is a generic object storage interface. Reflector knows how to watch a server +// and update a store. A generic store is provided, which allows Reflector to be used +// as a local caching system, and an LRU store, which allows Reflector to work like a +// queue of items yet to be processed. +// +// Store makes no assumptions about stored object identity; it is the responsibility +// of a Store implementation to provide a mechanism to correctly key objects and to +// define the contract for obtaining objects by some arbitrary key type. +type Store interface { + Add(obj interface{}) error + Update(obj interface{}) error + Delete(obj interface{}) error + List() []interface{} + ListKeys() []string + Get(obj interface{}) (item interface{}, exists bool, err error) + GetByKey(key string) (item interface{}, exists bool, err error) + + // Replace will delete the contents of the store, using instead the + // given list. Store takes ownership of the list, you should not reference + // it after calling this function. + Replace([]interface{}, string) error + Resync() error +} + +// KeyFunc knows how to make a key from an object. Implementations should be deterministic. +type KeyFunc func(obj interface{}) (string, error) + +// KeyError will be returned any time a KeyFunc gives an error; it includes the object +// at fault. +type KeyError struct { + Obj interface{} + Err error +} + +// Error gives a human-readable description of the error. +func (k KeyError) Error() string { + return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err) +} + +// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for +// the object but not the object itself. +type ExplicitKey string + +// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make +// keys for API objects which implement meta.Interface. +// The key uses the format / unless is empty, then +// it's just . +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func MetaNamespaceKeyFunc(obj interface{}) (string, error) { + if key, ok := obj.(ExplicitKey); ok { + return string(key), nil + } + meta, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("object has no meta: %v", err) + } + if len(meta.GetNamespace()) > 0 { + return meta.GetNamespace() + "/" + meta.GetName(), nil + } + return meta.GetName(), nil +} + +// SplitMetaNamespaceKey returns the namespace and name that +// MetaNamespaceKeyFunc encoded into key. +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func SplitMetaNamespaceKey(key string) (namespace, name string, err error) { + parts := strings.Split(key, "/") + switch len(parts) { + case 1: + // name only, no namespace + return "", parts[0], nil + case 2: + // namespace and name + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected key format: %q", key) +} + +// cache responsibilities are limited to: +// 1. Computing keys for objects via keyFunc +// 2. Invoking methods of a ThreadSafeStorage interface +type cache struct { + // cacheStorage bears the burden of thread safety for the cache + cacheStorage ThreadSafeStore + // keyFunc is used to make the key for objects stored in and retrieved from items, and + // should be deterministic. + keyFunc KeyFunc +} + +var _ Store = &cache{} + +// Add inserts an item into the cache. +func (c *cache) Add(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Add(key, obj) + return nil +} + +// Update sets an item in the cache to its updated state. +func (c *cache) Update(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Update(key, obj) + return nil +} + +// Delete removes an item from the cache. +func (c *cache) Delete(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Delete(key) + return nil +} + +// List returns a list of all the items. +// List is completely threadsafe as long as you treat all items as immutable. +func (c *cache) List() []interface{} { + return c.cacheStorage.List() +} + +// ListKeys returns a list of all the keys of the objects currently +// in the cache. +func (c *cache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// GetIndexers returns the indexers of cache +func (c *cache) GetIndexers() Indexers { + return c.cacheStorage.GetIndexers() +} + +// Index returns a list of items that match on the index function +// Index is thread-safe so long as you treat all items as immutable +func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) { + return c.cacheStorage.Index(indexName, obj) +} + +func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) { + return c.cacheStorage.IndexKeys(indexName, indexKey) +} + +// ListIndexFuncValues returns the list of generated values of an Index func +func (c *cache) ListIndexFuncValues(indexName string) []string { + return c.cacheStorage.ListIndexFuncValues(indexName) +} + +func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) { + return c.cacheStorage.ByIndex(indexName, indexKey) +} + +func (c *cache) AddIndexers(newIndexers Indexers) error { + return c.cacheStorage.AddIndexers(newIndexers) +} + +// Get returns the requested item, or sets exists=false. +// Get is completely threadsafe as long as you treat all items as immutable. +func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return c.GetByKey(key) +} + +// GetByKey returns the request item, or exists=false. +// GetByKey is completely threadsafe as long as you treat all items as immutable. +func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) { + item, exists = c.cacheStorage.Get(key) + return item, exists, nil +} + +// Replace will delete the contents of 'c', using instead the given list. +// 'c' takes ownership of the list, you should not reference the list again +// after calling this function. +func (c *cache) Replace(list []interface{}, resourceVersion string) error { + items := map[string]interface{}{} + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync touches all items in the store to force processing +func (c *cache) Resync() error { + return c.cacheStorage.Resync() +} + +// NewStore returns a Store implemented simply with a map and a lock. +func NewStore(keyFunc KeyFunc) Store { + return &cache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + } +} + +// NewIndexer returns an Indexer implemented simply with a map and a lock. +func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer { + return &cache{ + cacheStorage: NewThreadSafeStore(indexers, Indices{}), + keyFunc: keyFunc, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go new file mode 100644 index 000000000..1c201efb6 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -0,0 +1,304 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// ThreadSafeStore is an interface that allows concurrent access to a storage backend. +// TL;DR caveats: you must not modify anything returned by Get or List as it will break +// the indexing feature in addition to not being thread safe. +// +// The guarantees of thread safety provided by List/Get are only valid if the caller +// treats returned items as read-only. For example, a pointer inserted in the store +// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get` +// on the same key and modify the pointer in a non-thread-safe way. Also note that +// modifying objects stored by the indexers (if any) will *not* automatically lead +// to a re-index. So it's not a good idea to directly modify the objects returned by +// Get/List, in general. +type ThreadSafeStore interface { + Add(key string, obj interface{}) + Update(key string, obj interface{}) + Delete(key string) + Get(key string) (item interface{}, exists bool) + List() []interface{} + ListKeys() []string + Replace(map[string]interface{}, string) + Index(indexName string, obj interface{}) ([]interface{}, error) + IndexKeys(indexName, indexKey string) ([]string, error) + ListIndexFuncValues(name string) []string + ByIndex(indexName, indexKey string) ([]interface{}, error) + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error + Resync() error +} + +// threadSafeMap implements ThreadSafeStore +type threadSafeMap struct { + lock sync.RWMutex + items map[string]interface{} + + // indexers maps a name to an IndexFunc + indexers Indexers + // indices maps a name to an Index + indices Indices +} + +func (c *threadSafeMap) Add(key string, obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + oldObject := c.items[key] + c.items[key] = obj + c.updateIndices(oldObject, obj, key) +} + +func (c *threadSafeMap) Update(key string, obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + oldObject := c.items[key] + c.items[key] = obj + c.updateIndices(oldObject, obj, key) +} + +func (c *threadSafeMap) Delete(key string) { + c.lock.Lock() + defer c.lock.Unlock() + if obj, exists := c.items[key]; exists { + c.deleteFromIndices(obj, key) + delete(c.items, key) + } +} + +func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) { + c.lock.RLock() + defer c.lock.RUnlock() + item, exists = c.items[key] + return item, exists +} + +func (c *threadSafeMap) List() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]interface{}, 0, len(c.items)) + for _, item := range c.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the threadSafeMap. +func (c *threadSafeMap) ListKeys() []string { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]string, 0, len(c.items)) + for key := range c.items { + list = append(list, key) + } + return list +} + +func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) { + c.lock.Lock() + defer c.lock.Unlock() + c.items = items + + // rebuild any index + c.indices = Indices{} + for key, item := range c.items { + c.updateIndices(nil, item, key) + } +} + +// Index returns a list of items that match on the index function +// Index is thread-safe so long as you treat all items as immutable +func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + indexKeys, err := indexFunc(obj) + if err != nil { + return nil, err + } + index := c.indices[indexName] + + // need to de-dupe the return list. Since multiple keys are allowed, this can happen. + returnKeySet := sets.String{} + for _, indexKey := range indexKeys { + set := index[indexKey] + for _, key := range set.UnsortedList() { + returnKeySet.Insert(key) + } + } + + list := make([]interface{}, 0, returnKeySet.Len()) + for absoluteKey := range returnKeySet { + list = append(list, c.items[absoluteKey]) + } + return list, nil +} + +// ByIndex returns a list of items that match an exact value on the index function +func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := c.indices[indexName] + + set := index[indexKey] + list := make([]interface{}, 0, set.Len()) + for _, key := range set.List() { + list = append(list, c.items[key]) + } + + return list, nil +} + +// IndexKeys returns a list of keys that match on the index function. +// IndexKeys is thread-safe so long as you treat all items as immutable. +func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := c.indices[indexName] + + set := index[indexKey] + return set.List(), nil +} + +func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { + c.lock.RLock() + defer c.lock.RUnlock() + + index := c.indices[indexName] + names := make([]string, 0, len(index)) + for key := range index { + names = append(names, key) + } + return names +} + +func (c *threadSafeMap) GetIndexers() Indexers { + return c.indexers +} + +func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { + c.lock.Lock() + defer c.lock.Unlock() + + if len(c.items) > 0 { + return fmt.Errorf("cannot add indexers to running index") + } + + oldKeys := sets.StringKeySet(c.indexers) + newKeys := sets.StringKeySet(newIndexers) + + if oldKeys.HasAny(newKeys.List()...) { + return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) + } + + for k, v := range newIndexers { + c.indexers[k] = v + } + return nil +} + +// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj +// updateIndices must be called from a function that already has a lock on the cache +func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) { + // if we got an old object, we need to remove it before we add it again + if oldObj != nil { + c.deleteFromIndices(oldObj, key) + } + for name, indexFunc := range c.indexers { + indexValues, err := indexFunc(newObj) + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + index := c.indices[name] + if index == nil { + index = Index{} + c.indices[name] = index + } + + for _, indexValue := range indexValues { + set := index[indexValue] + if set == nil { + set = sets.String{} + index[indexValue] = set + } + set.Insert(key) + } + } +} + +// deleteFromIndices removes the object from each of the managed indexes +// it is intended to be called from a function that already has a lock on the cache +func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) { + for name, indexFunc := range c.indexers { + indexValues, err := indexFunc(obj) + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + index := c.indices[name] + if index == nil { + continue + } + for _, indexValue := range indexValues { + set := index[indexValue] + if set != nil { + set.Delete(key) + } + } + } +} + +func (c *threadSafeMap) Resync() error { + // Nothing to do + return nil +} + +func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { + return &threadSafeMap{ + items: map[string]interface{}{}, + indexers: indexers, + indices: indices, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/undelta_store.go b/vendor/k8s.io/client-go/tools/cache/undelta_store.go new file mode 100644 index 000000000..117df46c4 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/undelta_store.go @@ -0,0 +1,83 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// UndeltaStore listens to incremental updates and sends complete state on every change. +// It implements the Store interface so that it can receive a stream of mirrored objects +// from Reflector. Whenever it receives any complete (Store.Replace) or incremental change +// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc. +// It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results +// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values. +// PushFunc should be thread safe. +type UndeltaStore struct { + Store + PushFunc func([]interface{}) +} + +// Assert that it implements the Store interface. +var _ Store = &UndeltaStore{} + +// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. +// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} +// and the List. So, the following can happen, resulting in two identical calls to PushFunc. +// time thread 1 thread 2 +// 0 UndeltaStore.Add(a) +// 1 UndeltaStore.Add(b) +// 2 Store.Add(a) +// 3 Store.Add(b) +// 4 Store.List() -> [a,b] +// 5 Store.List() -> [a,b] + +func (u *UndeltaStore) Add(obj interface{}) error { + if err := u.Store.Add(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +func (u *UndeltaStore) Update(obj interface{}) error { + if err := u.Store.Update(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +func (u *UndeltaStore) Delete(obj interface{}) error { + if err := u.Store.Delete(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { + if err := u.Store.Replace(list, resourceVersion); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// NewUndeltaStore returns an UndeltaStore implemented with a Store. +func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore { + return &UndeltaStore{ + Store: NewStore(keyFunc), + PushFunc: pushFunc, + } +} diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go new file mode 100644 index 000000000..2e0874e0e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pager + +import ( + "fmt" + + "golang.org/x/net/context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const defaultPageSize = 500 + +// ListPageFunc returns a list object for the given list options. +type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) + +// SimplePageFunc adapts a context-less list function into one that accepts a context. +func SimplePageFunc(fn func(opts metav1.ListOptions) (runtime.Object, error)) ListPageFunc { + return func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + return fn(opts) + } +} + +// ListPager assists client code in breaking large list queries into multiple +// smaller chunks of PageSize or smaller. PageFn is expected to accept a +// metav1.ListOptions that supports paging and return a list. The pager does +// not alter the field or label selectors on the initial options list. +type ListPager struct { + PageSize int64 + PageFn ListPageFunc + + FullListIfExpired bool +} + +// New creates a new pager from the provided pager function using the default +// options. It will fall back to a full list if an expiration error is encountered +// as a last resort. +func New(fn ListPageFunc) *ListPager { + return &ListPager{ + PageSize: defaultPageSize, + PageFn: fn, + FullListIfExpired: true, + } +} + +// TODO: introduce other types of paging functions - such as those that retrieve from a list +// of namespaces. + +// List returns a single list object, but attempts to retrieve smaller chunks from the +// server to reduce the impact on the server. If the chunk attempt fails, it will load +// the full list instead. The Limit field on options, if unset, will default to the page size. +func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if options.Limit == 0 { + options.Limit = p.PageSize + } + var list *metainternalversion.List + for { + obj, err := p.PageFn(ctx, options) + if err != nil { + if !errors.IsResourceExpired(err) || !p.FullListIfExpired { + return nil, err + } + // the list expired while we were processing, fall back to a full list + options.Limit = 0 + options.Continue = "" + return p.PageFn(ctx, options) + } + m, err := meta.ListAccessor(obj) + if err != nil { + return nil, fmt.Errorf("returned object must be a list: %v", err) + } + + // exit early and return the object we got if we haven't processed any pages + if len(m.GetContinue()) == 0 && list == nil { + return obj, nil + } + + // initialize the list and fill its contents + if list == nil { + list = &metainternalversion.List{Items: make([]runtime.Object, 0, options.Limit+1)} + list.ResourceVersion = m.GetResourceVersion() + list.SelfLink = m.GetSelfLink() + } + if err := meta.EachListItem(obj, func(obj runtime.Object) error { + list.Items = append(list.Items, obj) + return nil + }); err != nil { + return nil, err + } + + // if we have no more items, return the list + if len(m.GetContinue()) == 0 { + return list, nil + } + + // set the next loop up + options.Continue = m.GetContinue() + } +} diff --git a/vendor/k8s.io/client-go/util/buffer/ring_growing.go b/vendor/k8s.io/client-go/util/buffer/ring_growing.go new file mode 100644 index 000000000..86965a513 --- /dev/null +++ b/vendor/k8s.io/client-go/util/buffer/ring_growing.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffer + +// RingGrowing is a growing ring buffer. +// Not thread safe. +type RingGrowing struct { + data []interface{} + n int // Size of Data + beg int // First available element + readable int // Number of data items available +} + +// NewRingGrowing constructs a new RingGrowing instance with provided parameters. +func NewRingGrowing(initialSize int) *RingGrowing { + return &RingGrowing{ + data: make([]interface{}, initialSize), + n: initialSize, + } +} + +// ReadOne reads (consumes) first item from the buffer if it is available, otherwise returns false. +func (r *RingGrowing) ReadOne() (data interface{}, ok bool) { + if r.readable == 0 { + return nil, false + } + r.readable-- + element := r.data[r.beg] + r.data[r.beg] = nil // Remove reference to the object to help GC + if r.beg == r.n-1 { + // Was the last element + r.beg = 0 + } else { + r.beg++ + } + return element, true +} + +// WriteOne adds an item to the end of the buffer, growing it if it is full. +func (r *RingGrowing) WriteOne(data interface{}) { + if r.readable == r.n { + // Time to grow + newN := r.n * 2 + newData := make([]interface{}, newN) + to := r.beg + r.readable + if to <= r.n { + copy(newData, r.data[r.beg:to]) + } else { + copied := copy(newData, r.data[r.beg:]) + copy(newData[copied:], r.data[:(to%r.n)]) + } + r.beg = 0 + r.data = newData + r.n = newN + } + r.data[(r.readable+r.beg)%r.n] = data + r.readable++ +} diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go new file mode 100644 index 000000000..3ac0840ad --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/util.go @@ -0,0 +1,79 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package retry + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" +) + +// DefaultRetry is the recommended retry for a conflict where multiple clients +// are making changes to the same resource. +var DefaultRetry = wait.Backoff{ + Steps: 5, + Duration: 10 * time.Millisecond, + Factor: 1.0, + Jitter: 0.1, +} + +// DefaultBackoff is the recommended backoff for a conflict where a client +// may be attempting to make an unrelated modification to a resource under +// active management by one or more controllers. +var DefaultBackoff = wait.Backoff{ + Steps: 4, + Duration: 10 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, +} + +// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting +// write. Callers should preserve previous executions if they wish to retry changes. It performs an +// exponential backoff. +// +// var pod *api.Pod +// err := RetryOnConflict(DefaultBackoff, func() (err error) { +// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) +// return +// }) +// if err != nil { +// // may be conflict if max retries were hit +// return err +// } +// ... +// +// TODO: Make Backoff an interface? +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + var lastConflictErr error + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + err := fn() + switch { + case err == nil: + return true, nil + case errors.IsConflict(err): + lastConflictErr = err + return false, nil + default: + return false, err + } + }) + if err == wait.ErrWaitTimeout { + err = lastConflictErr + } + return err +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go deleted file mode 100644 index e93542446..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file should be consistent with pkg/api/v1/annotation_key_constants.go. - -package api - -const ( - // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy - // webhook backend fails. - ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" - - // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation - PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" - - // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods - MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" - - // TolerationsAnnotationKey represents the key of tolerations data (json serialized) - // in the Annotations of a Pod. - TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" - - // TaintsAnnotationKey represents the key of taints data (json serialized) - // in the Annotations of a Node. - TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" - - // SeccompPodAnnotationKey represents the key of a seccomp profile applied - // to all containers of a pod. - SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" - - // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied - // to one container of a pod. - SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - - // CreatedByAnnotation represents the key used to store the spec(json) - // used to create the resource. - // This field is deprecated in favor of ControllerRef (see #44407). - // TODO(#50720): Remove this field in v1.9. - CreatedByAnnotation = "kubernetes.io/created-by" - - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) - // in the Annotations of a Node. - PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - - // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure - // container of a pod. The annotation value is a comma separated list of sysctl_name=value - // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by - // the kubelet. Pods with other sysctls will fail to launch. - SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" - - // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure - // container of a pod. The annotation value is a comma separated list of sysctl_name=value - // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly - // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use - // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet - // will fail to launch. - UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" - - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache - // an object (e.g. secret, config map) before fetching it again from apiserver. - // This annotation can be attached to node. - ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" - - // annotation key prefix used to identify non-convertible json paths. - NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" - - kubectlPrefix = "kubectl.kubernetes.io/" - - // LastAppliedConfigAnnotation is the annotation used to store the previous - // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. - LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" - - // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers - // - // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to - // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow - // access only from the CIDRs currently allocated to MIT & the USPS. - // - // Not all cloud providers support this annotation, though AWS & GCE do. - AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go b/vendor/k8s.io/kubernetes/pkg/api/field_constants.go deleted file mode 100644 index 5ead0f13f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -// Field path constants that are specific to the internal API -// representation. -const ( - NodeUnschedulableField = "spec.unschedulable" - ObjectNameField = "metadata.name" - PodHostField = "spec.nodeName" - PodStatusField = "status.phase" - SecretTypeField = "type" - - EventReasonField = "reason" - EventSourceField = "source" - EventTypeField = "type" - EventInvolvedKindField = "involvedObject.kind" - EventInvolvedNamespaceField = "involvedObject.namespace" - EventInvolvedNameField = "involvedObject.name" - EventInvolvedUIDField = "involvedObject.uid" - EventInvolvedAPIVersionField = "involvedObject.apiVersion" - EventInvolvedResourceVersionField = "involvedObject.resourceVersion" - EventInvolvedFieldPathField = "involvedObject.fieldPath" -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/json.go b/vendor/k8s.io/kubernetes/pkg/api/json.go deleted file mode 100644 index 3a6e04c18..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/json.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import "encoding/json" - -// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations -// to prevent anyone from marshaling these internal structs. - -var _ = json.Marshaler(&AvoidPods{}) -var _ = json.Unmarshaler(&AvoidPods{}) - -func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } -func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go b/vendor/k8s.io/kubernetes/pkg/api/objectreference.go deleted file mode 100644 index b36ecab27..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package api - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/api/register.go b/vendor/k8s.io/kubernetes/pkg/api/register.go deleted file mode 100644 index 39562e5a5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/register.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "os" - - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) -var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) - -// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global -// API registry. -var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) - -// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. -// NOTE: If you are copying this file to start a new api group, STOP! Copy the -// extensions group instead. This Scheme is special and should appear ONLY in -// the api group, unless you really know what you're doing. -// TODO(lavalamp): make the above error impossible. -var Scheme = runtime.NewScheme() - -// Codecs provides access to encoding and decoding for the scheme -var Codecs = serializer.NewCodecFactory(Scheme) - -// GroupName is the group name use in this package -const GroupName = "" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// ParameterCodec handles versioning of objects that are converted to query parameters. -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { - return err - } - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &PodList{}, - &PodStatusResult{}, - &PodTemplate{}, - &PodTemplateList{}, - &ReplicationControllerList{}, - &ReplicationController{}, - &ServiceList{}, - &Service{}, - &ServiceProxyOptions{}, - &NodeList{}, - &Node{}, - &NodeConfigSource{}, - &NodeProxyOptions{}, - &Endpoints{}, - &EndpointsList{}, - &Binding{}, - &Event{}, - &EventList{}, - &List{}, - &LimitRange{}, - &LimitRangeList{}, - &ResourceQuota{}, - &ResourceQuotaList{}, - &Namespace{}, - &NamespaceList{}, - &ServiceAccount{}, - &ServiceAccountList{}, - &Secret{}, - &SecretList{}, - &PersistentVolume{}, - &PersistentVolumeList{}, - &PersistentVolumeClaim{}, - &PersistentVolumeClaimList{}, - &PodAttachOptions{}, - &PodLogOptions{}, - &PodExecOptions{}, - &PodPortForwardOptions{}, - &PodProxyOptions{}, - &ComponentStatus{}, - &ComponentStatusList{}, - &SerializedReference{}, - &RangeAllocation{}, - &ConfigMap{}, - &ConfigMapList{}, - ) - - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource.go b/vendor/k8s.io/kubernetes/pkg/api/resource.go deleted file mode 100644 index ce1747d8e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/apimachinery/pkg/api/resource" -) - -func (self ResourceName) String() string { - return string(self) -} - -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} -} - -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) NvidiaGPU() *resource.Quantity { - if val, ok := (*self)[ResourceNvidiaGPU]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) StorageEphemeral() *resource.Quantity { - if val, ok := (*self)[ResourceEphemeralStorage]; ok { - return &val - } - return &resource.Quantity{} -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/taint.go b/vendor/k8s.io/kubernetes/pkg/api/taint.go deleted file mode 100644 index 173e4e601..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/taint.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package api - -import "fmt" - -// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, -// if the two taints have same key:effect, regard as they match. -func (t *Taint) MatchTaint(taintToMatch Taint) bool { - return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect -} - -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. -func (t *Taint) ToString() string { - if len(t.Value) == 0 { - return fmt.Sprintf("%v:%v", t.Key, t.Effect) - } - return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/toleration.go b/vendor/k8s.io/kubernetes/pkg/api/toleration.go deleted file mode 100644 index edb73b74e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/toleration.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package api - -// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , -// if the two tolerations have same combination, regard as they match. -// TODO: uniqueness check for tolerations in api validations. -func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { - return t.Key == tolerationToMatch.Key && - t.Effect == tolerationToMatch.Effect && - t.Operator == tolerationToMatch.Operator && - t.Value == tolerationToMatch.Value -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/api/types.go deleted file mode 100644 index 92d45de61..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/types.go +++ /dev/null @@ -1,4287 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// Common string formats -// --------------------- -// Many fields in this API have formatting requirements. The commonly used -// formats are defined here. -// -// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" -// in the C language. This is captured by the following regex: -// [A-Za-z_][A-Za-z0-9_]* -// This defines the format, but not the length restriction, which should be -// specified at the definition of any field of this type. -// -// DNS_LABEL: This is a string, no more than 63 characters long, that conforms -// to the definition of a "label" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])? -// -// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms -// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// or more simply: -// DNS_LABEL(\.DNS_LABEL)* -// -// IANA_SVC_NAME: This is a string, no more than 15 characters long, that -// conforms to the definition of IANA service name in RFC 6335. -// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. -// Hypens ('-') cannot be leading or trailing character of the string -// and cannot be adjacent to other hyphens. - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. -type ObjectMeta struct { - // Name is unique within a namespace. Name is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // +optional - Name string - - // GenerateName indicates that the name should be made unique by the server prior to persisting - // it. A non-empty value for the field indicates the name will be made unique (and the name - // returned to the client will be different than the name passed). The value of this field will - // be combined with a unique suffix on the server if the Name field has not been provided. - // The provided value must be valid within the rules for Name, and may be truncated by the length - // of the suffix required to make the value unique on the server. - // - // If this field is specified, and Name is not present, the server will NOT return a 409 if the - // generated name exists - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // +optional - GenerateName string - - // Namespace defines the space within which name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // +optional - Namespace string - - // SelfLink is a URL representing this object. - // +optional - SelfLink string - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // +optional - UID types.UID - - // An opaque value that represents the version of this resource. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and values may only be valid for a particular - // resource or set of resources. Only servers will generate resource versions. - // +optional - ResourceVersion string - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - // +optional - Generation int64 - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // +optional - CreationTimestamp metav1.Time - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - DeletionTimestamp *metav1.Time - - // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion - // was requested. Represents the most recent grace period, and may only be shortened once set. - // +optional - DeletionGracePeriodSeconds *int64 - - // Labels are key value pairs that may be used to scope and select individual resources. - // Label keys are of the form: - // label-key ::= prefixed-name | name - // prefixed-name ::= prefix '/' name - // prefix ::= DNS_SUBDOMAIN - // name ::= DNS_LABEL - // The prefix is optional. If the prefix is not specified, the key is assumed to be private - // to the user. Other system components that wish to use labels must specify a prefix. The - // "kubernetes.io/" prefix is reserved for use by kubernetes components. - // +optional - Labels map[string]string - - // Annotations are unstructured key value data stored with a resource that may be set by - // external tooling. They are not queryable and should be preserved when modifying - // objects. Annotation keys have the same formatting restrictions as Label keys. See the - // comments on Labels for details. - // +optional - Annotations map[string]string - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - // +optional - OwnerReferences []metav1.OwnerReference - - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - Initializers *metav1.Initializers - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - // +optional - Finalizers []string - - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - // +optional - ClusterName string -} - -const ( - // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" - // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" - // NamespaceNone is the argument for a context when there is no namespace. - NamespaceNone string = "" - // NamespaceSystem is the system namespace where we place system components. - NamespaceSystem string = "kube-system" - // NamespacePublic is the namespace where we place public info (ConfigMaps) - NamespacePublic string = "kube-public" - // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" -) - -// Volume represents a named volume in a pod that may be accessed by any containers in the pod. -type Volume struct { - // Required: This must be a DNS_LABEL. Each volume in a pod must have - // a unique name. - Name string - // The VolumeSource represents the location and type of a volume to mount. - // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - // +optional - VolumeSource -} - -// VolumeSource represents the source location of a volume to mount. -// Only one of its members may be specified. -type VolumeSource struct { - // HostPath represents file or directory on the host machine that is - // directly exposed to the container. This is generally used for system - // agents or other privileged things that are allowed to see the host - // machine. Most containers will NOT need this. - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - // +optional - HostPath *HostPathVolumeSource - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // +optional - EmptyDir *EmptyDirVolumeSource - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // GitRepo represents a git repository at a particular revision. - // +optional - GitRepo *GitRepoVolumeSource - // Secret represents a secret that should populate this volume. - // +optional - Secret *SecretVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime - // +optional - Glusterfs *GlusterfsVolumeSource - // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace - // +optional - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. - // +optional - FlexVolume *FlexVolumeSource - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderVolumeSource - - // CephFS represents a Cephfs mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSVolumeSource - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - - // DownwardAPI represents metadata about the pod that should populate this volume - // +optional - DownwardAPI *DownwardAPIVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFileVolumeSource - // ConfigMap represents a configMap that should populate this volume - // +optional - ConfigMap *ConfigMapVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // Items for all in one resources secrets, configmaps, and downward API - Projected *ProjectedVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // +optional - StorageOS *StorageOSVolumeSource -} - -// Similar to VolumeSource but meant for the administrator who creates PVs. -// Exactly one of its members must be set. -type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // +optional - HostPath *HostPathVolumeSource - // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod - // +optional - Glusterfs *GlusterfsVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - // ISCSIVolumeSource represents an ISCSI resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. - // +optional - FlexVolume *FlexVolumeSource - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderVolumeSource - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSPersistentVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFilePersistentVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource - // Local represents directly-attached storage with node affinity - // +optional - Local *LocalVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md - // +optional - StorageOS *StorageOSPersistentVolumeSource -} - -type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume - ClaimName string - // Optional: Defaults to false (read/write). ReadOnly here - // will force the ReadOnly setting in VolumeMounts - // +optional - ReadOnly bool -} - -const ( - // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. - // It's currently still used and will be held for backwards compatibility - BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" - - // MountOptionAnnotation defines mount option annotation used in PVs - MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" - - // AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume. - // Value is a string of the json representation of type NodeAffinity - AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolume struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - //Spec defines a persistent volume owned by the cluster - // +optional - Spec PersistentVolumeSpec - - // Status represents the current information about persistent volume. - // +optional - Status PersistentVolumeStatus -} - -type PersistentVolumeSpec struct { - // Resources represents the actual resources of the volume - Capacity ResourceList - // Source represents the location and type of a volume to mount. - PersistentVolumeSource - // AccessModes contains all ways the volume can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // ClaimRef is expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is - // ignored, i.e. labels of this PV do not need to match PVC selector. - // +optional - ClaimRef *ObjectReference - // Optional: what happens to a persistent volume when released from its claim. - // +optional - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy - // Name of StorageClass to which this persistent volume belongs. Empty value - // means that this volume does not belong to any StorageClass. - // +optional - StorageClassName string - // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will - // simply fail if one is invalid. - // +optional - MountOptions []string -} - -// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes -type PersistentVolumeReclaimPolicy string - -const ( - // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. - // The volume plugin must support Recycling. - PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" - // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. - // The volume plugin must support Deletion. - PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. - // The default policy is Retain. - PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" -) - -type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim - // +optional - Phase PersistentVolumePhase - // A human-readable message indicating details about why the volume is in this state. - // +optional - Message string - // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI - // +optional - Reason string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolumeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolume -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -type PersistentVolumeClaim struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the volume requested by a pod author - // +optional - Spec PersistentVolumeClaimSpec - - // Status represents the current information about a claim - // +optional - Status PersistentVolumeClaimStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolumeClaimList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolumeClaim -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -type PersistentVolumeClaimSpec struct { - // Contains the types of access modes required - // +optional - AccessModes []PersistentVolumeAccessMode - // A label query over volumes to consider for binding. This selector is - // ignored when VolumeName is set - // +optional - Selector *metav1.LabelSelector - // Resources represents the minimum resources required - // +optional - Resources ResourceRequirements - // VolumeName is the binding reference to the PersistentVolume backing this - // claim. When set to non-empty value Selector is not evaluated - // +optional - VolumeName string - // Name of the StorageClass required by the claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 - // +optional - StorageClassName *string -} - -type PersistentVolumeClaimConditionType string - -// These are valid conditions of Pvc -const ( - // An user trigger resize of pvc has been started - PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" -) - -type PersistentVolumeClaimCondition struct { - Type PersistentVolumeClaimConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim - // +optional - Phase PersistentVolumeClaimPhase - // AccessModes contains all ways the volume backing the PVC can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // Represents the actual resources of the underlying volume - // +optional - Capacity ResourceList - // +optional - Conditions []PersistentVolumeClaimCondition -} - -type PersistentVolumeAccessMode string - -const ( - // can be mounted read/write mode to exactly 1 host - ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" - // can be mounted in read-only mode to many hosts - ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" - // can be mounted in read/write mode to many hosts - ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" -) - -type PersistentVolumePhase string - -const ( - // used for PersistentVolumes that are not available - VolumePending PersistentVolumePhase = "Pending" - // used for PersistentVolumes that are not yet bound - // Available volumes are held by the binder and matched to PersistentVolumeClaims - VolumeAvailable PersistentVolumePhase = "Available" - // used for PersistentVolumes that are bound - VolumeBound PersistentVolumePhase = "Bound" - // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted - // released volumes must be recycled before becoming available again - // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource - VolumeReleased PersistentVolumePhase = "Released" - // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim - VolumeFailed PersistentVolumePhase = "Failed" -) - -type PersistentVolumeClaimPhase string - -const ( - // used for PersistentVolumeClaims that are not yet bound - ClaimPending PersistentVolumeClaimPhase = "Pending" - // used for PersistentVolumeClaims that are bound - ClaimBound PersistentVolumeClaimPhase = "Bound" - // used for PersistentVolumeClaims that lost their underlying - // PersistentVolume. The claim was bound to a PersistentVolume and this - // volume does not exist any longer and all data on it was lost. - ClaimLost PersistentVolumeClaimPhase = "Lost" -) - -type HostPathType string - -const ( - // For backwards compatible, leave it empty if unset - HostPathUnset HostPathType = "" - // If nothing exists at the given path, an empty directory will be created there - // as needed with file mode 0755, having the same group and ownership with Kubelet. - HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" - // A directory must exist at the given path - HostPathDirectory HostPathType = "Directory" - // If nothing exists at the given path, an empty file will be created there - // as needed with file mode 0644, having the same group and ownership with Kubelet. - HostPathFileOrCreate HostPathType = "FileOrCreate" - // A file must exist at the given path - HostPathFile HostPathType = "File" - // A UNIX socket must exist at the given path - HostPathSocket HostPathType = "Socket" - // A character device must exist at the given path - HostPathCharDev HostPathType = "CharDevice" - // A block device must exist at the given path - HostPathBlockDev HostPathType = "BlockDevice" -) - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -type HostPathVolumeSource struct { - // If the path is a symlink, it will follow the link to the real path. - Path string - // Defaults to "" - Type *HostPathType -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -type EmptyDirVolumeSource struct { - // TODO: Longer term we want to represent the selection of underlying - // media more like a scheduling problem - user says what traits they - // need, we give them a backing store that satisfies that. For now - // this will cover the most common needs. - // Optional: what type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // +optional - Medium StorageMedium - // Total amount of local storage required for this EmptyDir volume. - // The size limit is also applicable for memory medium. - // The maximum usage on memory medium EmptyDir would be the minimum value between - // the SizeLimit specified here and the sum of memory limits of all containers in a pod. - // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir - // +optional - SizeLimit *resource.Quantity -} - -// StorageMedium defines ways that storage can be allocated to a volume. -type StorageMedium string - -const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node - StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) - StorageMediumHugePages StorageMedium = "HugePages" // use hugepages -) - -// Protocol defines network protocols supported for things like container ports. -type Protocol string - -const ( - // ProtocolTCP is the TCP protocol. - ProtocolTCP Protocol = "TCP" - // ProtocolUDP is the UDP protocol. - ProtocolUDP Protocol = "UDP" -) - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource. Used to identify the disk in GCE - PDName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIVolumeSource struct { - // Required: iSCSI target portal - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - TargetPortal string - // Required: target iSCSI Qualified Name - // +optional - IQN string - // Required: iSCSI target lun number - // +optional - Lun int32 - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - // +optional - ISCSIInterface string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: list of iSCSI target portal ips for high availability. - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - Portals []string - // Optional: whether support iSCSI Discovery CHAP authentication - // +optional - DiscoveryCHAPAuth bool - // Optional: whether support iSCSI Session CHAP authentication - // +optional - SessionCHAPAuth bool - // Optional: CHAP secret for iSCSI target and initiator authentication. - // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true - // +optional - SecretRef *LocalObjectReference - // Optional: Custom initiator name per volume. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - InitiatorName *string -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -type FCVolumeSource struct { - // Optional: FC target worldwide names (WWNs) - // +optional - TargetWWNs []string - // Optional: FC target lun number - // +optional - Lun *int32 - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: FC volume World Wide Identifiers (WWIDs) - // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. - // +optional - WWIDs []string -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. -type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: Extra driver options if any. - // +optional - Options map[string]string -} - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -type AWSElasticBlockStoreVolumeSource struct { - // Unique id of the persistent disk resource. Used to identify the disk in AWS - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -type GitRepoVolumeSource struct { - // Repository URL - Repository string - // Commit hash, this is optional - // +optional - Revision string - // Clone target, this is optional - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - // +optional - Directory string - // TODO: Consider credentials here. -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. - // +optional - SecretName string - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Adapts a secret into a projected volume. -// -// The contents of the target Secret's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names. -// Note that this is identical to a secret volume source without the default -// mode. -type SecretProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server - Server string - - // Path is the exported NFS share - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the NFS export to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -type QuobyteVolumeSource struct { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - Registry string - - // Volume is a string that references an already created Quobyte volume by name. - Volume string - - // Defaults to false (read/write). ReadOnly here will force - // the Quobyte to be mounted with read-only permissions - // +optional - ReadOnly bool - - // User to map volume access to - // Defaults to the root user - // +optional - User string - - // Group to map volume access to - // Default is no group - // +optional - Group string -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsVolumeSource struct { - // Required: EndpointsName is the endpoint name that details Glusterfs topology - EndpointsName string - - // Required: Path is the Glusterfs volume path - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the Glusterfs to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDVolumeSource struct { - // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string - // Required: RBDImage is the rados image name - RBDImage string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: RadosPool is the rados pool name,default is rbd - // +optional - RBDPool string - // Optional: RBDUser is the rados user name, default is admin - // +optional - RadosUser string - // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring - // +optional - Keyring string - // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a cinder volume resource in Openstack. A Cinder volume -// must exist before mounting to a container. The volume must also be -// in the same region as the kubelet. Cinder volumes support ownership -// management and SELinux relabeling. -type CinderVolumeSource struct { - // Unique id of the volume used to identify the cinder volume - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// SecretReference represents a Secret Reference. It has enough information to retrieve secret -// in any namespace -type SecretReference struct { - // Name is unique within a namespace to reference a secret resource. - // +optional - Name string - // Namespace defines the space within which the secret name must be unique. - // +optional - Namespace string -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSPersistentVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *SecretReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Flocker volume mounted by the Flocker agent. -// One and only one of datasetName and datasetUUID should be set. -// Flocker volumes do not support ownership management or SELinux relabeling. -type FlockerVolumeSource struct { - // Name of the dataset stored as metadata -> name on the dataset for Flocker - // should be considered as deprecated - // +optional - DatasetName string - // UUID of the dataset. This is unique identifier of a Flocker dataset - // +optional - DatasetUUID string -} - -// Represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -type DownwardAPIVolumeSource struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Represents a single file containing information from the downward API -type DownwardAPIVolumeFile struct { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string - // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// Represents downward API info for projecting into a projected volume. -// Note that this is identical to a downwardAPI volume source without the default -// mode. -type DownwardAPIProjection struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFilePersistentVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // the namespace of the secret that contains Azure Storage Account Name and Key - // default is the same as the Pod - // +optional - SecretNamespace *string -} - -// Represents a vSphere volume resource. -type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk - VolumePath string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Storage Policy Based Management (SPBM) profile name. - // +optional - StoragePolicyName string - // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. - // +optional - StoragePolicyID string -} - -// Represents a Photon Controller persistent disk resource. -type PhotonPersistentDiskVolumeSource struct { - // ID that identifies Photon Controller persistent disk - PdID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string -} - -// PortworxVolumeSource represents a Portworx volume resource. -type PortworxVolumeSource struct { - // VolumeID uniquely identifies a Portworx volume - VolumeID string - // FSType represents the filesystem type to mount - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -type AzureDataDiskCachingMode string -type AzureDataDiskKind string - -const ( - AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" - AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" - AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" - - AzureSharedBlobDisk AzureDataDiskKind = "Shared" - AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" - AzureManagedDisk AzureDataDiskKind = "Managed" -) - -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -type AzureDiskVolumeSource struct { - // The Name of the data disk in the blob storage - DiskName string - // The URI of the data disk in the blob storage - DataDiskURI string - // Host Caching mode: None, Read Only, Read Write. - // +optional - CachingMode *AzureDataDiskCachingMode - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType *string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly *bool - // Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared - Kind *AzureDataDiskKind -} - -// ScaleIOVolumeSource represents a persistent ScaleIO volume -type ScaleIOVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string - // The name of the storage system as configured in ScaleIO. - System string - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *LocalObjectReference - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool - // The name of the Protection Domain for the configured storage (defaults to "default"). - // +optional - ProtectionDomain string - // The Storage Pool associated with the protection domain (defaults to "default"). - // +optional - StoragePool string - // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). - // +optional - StorageMode string - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - VolumeName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a StorageOS persistent volume resource. -type StorageOSVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *LocalObjectReference -} - -// Represents a StorageOS persistent volume resource. -type StorageOSPersistentVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *ObjectReference -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -type ConfigMapVolumeSource struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// Adapts a ConfigMap into a projected volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names, -// unless the items element is populated with specific mappings of keys to paths. -// Note that this is identical to a configmap volume source without the default -// mode. -type ConfigMapProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// Represents a projected volume source -type ProjectedVolumeSource struct { - // list of volume projections - Sources []VolumeProjection - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Projection that may be projected along with other supported volume types -type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - - // information about the secret data to project - Secret *SecretProjection - // information about the downwardAPI data to project - DownwardAPI *DownwardAPIProjection - // information about the configMap data to project - ConfigMap *ConfigMapProjection -} - -// Maps a string key to a path within a volume. -type KeyToPath struct { - // The key to project. - Key string - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - Path string - // Optional: mode bits to use on this file, should be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// Local represents directly-attached storage with node affinity -type LocalVolumeSource struct { - // The full path to the volume on the node - // For alpha, this path must be a directory - // Once block as a source is supported, then this path can point to a block device - Path string -} - -// ContainerPort represents a network port in a single container -type ContainerPort struct { - // Optional: If specified, this must be an IANA_SVC_NAME Each named port - // in a pod must have a unique name. - // +optional - Name string - // Optional: If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // +optional - HostPort int32 - // Required: This must be a valid port number, 0 < x < 65536. - ContainerPort int32 - // Required: Supports "TCP" and "UDP". - // +optional - Protocol Protocol - // Optional: What host IP to bind the external port to. - // +optional - HostIP string -} - -// VolumeMount describes a mounting of a Volume within a container. -type VolumeMount struct { - // Required: This must match the Name of a Volume [above]. - Name string - // Optional: Defaults to false (read-write). - // +optional - ReadOnly bool - // Required. Must not contain ':'. - MountPath string - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - // +optional - SubPath string - // mountPropagation determines how mounts are propagated from the host - // to container and the other way around. - // When not set, MountPropagationHostToContainer is used. - // This field is alpha in 1.8 and can be reworked or removed in a future - // release. - // +optional - MountPropagation *MountPropagationMode -} - -// MountPropagationMode describes mount propagation. -type MountPropagationMode string - -const ( - // MountPropagationHostToContainer means that the volume in a container will - // receive new mounts from the host or other containers, but filesystems - // mounted inside the container won't be propagated to the host or other - // containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rslave" in Linux terminology). - MountPropagationHostToContainer MountPropagationMode = "HostToContainer" - // MountPropagationBidirectional means that the volume in a container will - // receive new mounts from the host or other containers, and its own mounts - // will be propagated from the container to the host or other containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rshared" in Linux terminology). - MountPropagationBidirectional MountPropagationMode = "Bidirectional" -) - -// EnvVar represents an environment variable present in a Container. -type EnvVar struct { - // Required: This must be a C_IDENTIFIER. - Name string - // Optional: no more than one of the following may be specified. - // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // +optional - Value string - // Optional: Specifies a source the value of this var should come from. - // +optional - ValueFrom *EnvVarSource -} - -// EnvVarSource represents a source for the value of an EnvVar. -// Only one of its fields may be set. -type EnvVarSource struct { - // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Selects a key of a ConfigMap. - // +optional - ConfigMapKeyRef *ConfigMapKeySelector - // Selects a key of a secret in the pod's namespace. - // +optional - SecretKeyRef *SecretKeySelector -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -type ObjectFieldSelector struct { - // Required: Version of the schema the FieldPath is written in terms of. - // If no value is specified, it will be defaulted to the APIVersion of the - // enclosing object. - APIVersion string - // Required: Path of the field to select in the specified API version - FieldPath string -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -type ResourceFieldSelector struct { - // Container name: required for volumes, optional for env vars - // +optional - ContainerName string - // Required: resource to select - Resource string - // Specifies the output format of the exposed resources, defaults to "1" - // +optional - Divisor resource.Quantity -} - -// Selects a key from a ConfigMap. -type ConfigMapKeySelector struct { - // The ConfigMap to select from. - LocalObjectReference - // The key to select. - Key string - // Specify whether the ConfigMap or it's key must be defined - // +optional - Optional *bool -} - -// SecretKeySelector selects a key of a Secret. -type SecretKeySelector struct { - // The name of the secret in the pod's namespace to select from. - LocalObjectReference - // The key of the secret to select from. Must be a valid secret key. - Key string - // Specify whether the Secret or it's key must be defined - // +optional - Optional *bool -} - -// EnvFromSource represents the source of a set of ConfigMaps -type EnvFromSource struct { - // An optional identifier to prepend to each key in the ConfigMap. - // +optional - Prefix string - // The ConfigMap to select from. - //+optional - ConfigMapRef *ConfigMapEnvSource - // The Secret to select from. - //+optional - SecretRef *SecretEnvSource -} - -// ConfigMapEnvSource selects a ConfigMap to populate the environment -// variables with. -// -// The contents of the target ConfigMap's Data field will represent the -// key-value pairs as environment variables. -type ConfigMapEnvSource struct { - // The ConfigMap to select from. - LocalObjectReference - // Specify whether the ConfigMap must be defined - // +optional - Optional *bool -} - -// SecretEnvSource selects a Secret to populate the environment -// variables with. -// -// The contents of the target Secret's Data field will represent the -// key-value pairs as environment variables. -type SecretEnvSource struct { - // The Secret to select from. - LocalObjectReference - // Specify whether the Secret must be defined - // +optional - Optional *bool -} - -// HTTPHeader describes a custom header to be used in HTTP probes -type HTTPHeader struct { - // The header field name - Name string - // The header field value - Value string -} - -// HTTPGetAction describes an action based on HTTP Get requests. -type HTTPGetAction struct { - // Optional: Path to access on the HTTP server. - // +optional - Path string - // Required: Name or number of the port to access on the container. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. You - // probably want to set "Host" in httpHeaders instead. - // +optional - Host string - // Optional: Scheme to use for connecting to the host, defaults to HTTP. - // +optional - Scheme URIScheme - // Optional: Custom headers to set in the request. HTTP allows repeated headers. - // +optional - HTTPHeaders []HTTPHeader -} - -// URIScheme identifies the scheme used for connection to a host for Get actions -type URIScheme string - -const ( - // URISchemeHTTP means that the scheme used will be http:// - URISchemeHTTP URIScheme = "HTTP" - // URISchemeHTTPS means that the scheme used will be https:// - URISchemeHTTPS URIScheme = "HTTPS" -) - -// TCPSocketAction describes an action based on opening a socket -type TCPSocketAction struct { - // Required: Port to connect to. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. - // +optional - Host string -} - -// ExecAction describes a "run in container" action. -type ExecAction struct { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // +optional - Command []string -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -type Probe struct { - // The action taken to determine the health of a container - Handler - // Length of time before health checking is activated. In seconds. - // +optional - InitialDelaySeconds int32 - // Length of time before health checking times out. In seconds. - // +optional - TimeoutSeconds int32 - // How often (in seconds) to perform the probe. - // +optional - PeriodSeconds int32 - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Must be 1 for liveness. - // +optional - SuccessThreshold int32 - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // +optional - FailureThreshold int32 -} - -// PullPolicy describes a policy for if/when to pull a container image -type PullPolicy string - -const ( - // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - PullAlways PullPolicy = "Always" - // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present - PullNever PullPolicy = "Never" - // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - PullIfNotPresent PullPolicy = "IfNotPresent" -) - -// TerminationMessagePolicy describes how termination messages are retrieved from a container. -type TerminationMessagePolicy string - -const ( - // TerminationMessageReadFile is the default behavior and will set the container status message to - // the contents of the container's terminationMessagePath when the container exits. - TerminationMessageReadFile TerminationMessagePolicy = "File" - // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs - // for the container status message when the container exits with an error and the - // terminationMessagePath has no contents. - TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" -) - -// Capability represent POSIX capabilities type -type Capability string - -// Capabilities represent POSIX capabilities that can be added or removed to a running container. -type Capabilities struct { - // Added capabilities - // +optional - Add []Capability - // Removed capabilities - // +optional - Drop []Capability -} - -// ResourceRequirements describes the compute resource requirements. -type ResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - // +optional - Limits ResourceList - // Requests describes the minimum amount of compute resources required. - // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value - // +optional - Requests ResourceList -} - -// Container represents a single container that is expected to be run on the host. -type Container struct { - // Required: This must be a DNS_LABEL. Each container in a pod must - // have a unique name. - Name string - // Required. - Image string - // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Command []string - // Optional: The docker image's cmd is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Args []string - // Optional: Defaults to Docker's default. - // +optional - WorkingDir string - // +optional - Ports []ContainerPort - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []EnvFromSource - // +optional - Env []EnvVar - // Compute resource requirements. - // +optional - Resources ResourceRequirements - // +optional - VolumeMounts []VolumeMount - // +optional - LivenessProbe *Probe - // +optional - ReadinessProbe *Probe - // +optional - Lifecycle *Lifecycle - // Required. - // +optional - TerminationMessagePath string - // +optional - TerminationMessagePolicy TerminationMessagePolicy - // Required: Policy for pulling images for this container - ImagePullPolicy PullPolicy - // Optional: SecurityContext defines the security options the container should be run with. - // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - // +optional - SecurityContext *SecurityContext - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - // +optional - Stdin bool - // +optional - StdinOnce bool - // +optional - TTY bool -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. - // Exec specifies the action to take. - // +optional - Exec *ExecAction - // HTTPGet specifies the http request to perform. - // +optional - HTTPGet *HTTPGetAction - // TCPSocket specifies an action involving a TCP port. - // TODO: implement a realistic TCP lifecycle hook - // +optional - TCPSocket *TCPSocketAction -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -type Lifecycle struct { - // PostStart is called immediately after a container is created. If the handler fails, the container - // is terminated and restarted. - // +optional - PostStart *Handler - // PreStop is called immediately before a container is terminated. The reason for termination is - // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. - // +optional - PreStop *Handler -} - -// The below types are used by kube_client and api_server. - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; -// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -type ContainerStateWaiting struct { - // A brief CamelCase string indicating details about why the container is in waiting state. - // +optional - Reason string - // A human-readable message indicating details about why the container is in waiting state. - // +optional - Message string -} - -type ContainerStateRunning struct { - // +optional - StartedAt metav1.Time -} - -type ContainerStateTerminated struct { - ExitCode int32 - // +optional - Signal int32 - // +optional - Reason string - // +optional - Message string - // +optional - StartedAt metav1.Time - // +optional - FinishedAt metav1.Time - // +optional - ContainerID string -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -type ContainerState struct { - // +optional - Waiting *ContainerStateWaiting - // +optional - Running *ContainerStateRunning - // +optional - Terminated *ContainerStateTerminated -} - -type ContainerStatus struct { - // Each container in a pod must have a unique name. - Name string - // +optional - State ContainerState - // +optional - LastTerminationState ContainerState - // Ready specifies whether the container has passed its readiness check. - Ready bool - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 - Image string - ImageID string - // +optional - ContainerID string -} - -// PodPhase is a label for the condition of a pod at the current time. -type PodPhase string - -// These are the valid statuses of pods. -const ( - // PodPending means the pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as well as time spent - // pulling images onto the host. - PodPending PodPhase = "Pending" - // PodRunning means the pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being restarted. - PodRunning PodPhase = "Running" - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - PodSucceeded PodPhase = "Succeeded" - // PodFailed means that all containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by the system). - PodFailed PodPhase = "Failed" - // PodUnknown means that for some reason the state of the pod could not be obtained, typically due - // to an error in communicating with the host of the pod. - PodUnknown PodPhase = "Unknown" -) - -type PodConditionType string - -// These are valid conditions of pod. -const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" - // PodReady means the pod is able to service requests and should be added to the - // load balancing pools of all matching services. - PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" - // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler - // can't schedule the pod right now, for example due to insufficient resources in the cluster. - PodReasonUnschedulable = "Unschedulable" -) - -type PodCondition struct { - Type PodConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// RestartPolicy describes how the container should be restarted. -// Only one of the following restart policies may be specified. -// If none of the following policies is specified, the default one -// is RestartPolicyAlways. -type RestartPolicy string - -const ( - RestartPolicyAlways RestartPolicy = "Always" - RestartPolicyOnFailure RestartPolicy = "OnFailure" - RestartPolicyNever RestartPolicy = "Never" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodList is a list of Pods. -type PodList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Pod -} - -// DNSPolicy defines how a pod's DNS will be configured. -type DNSPolicy string - -const ( - // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default - // (as determined by kubelet) DNS settings. - DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" - - // DNSClusterFirst indicates that the pod should use cluster DNS - // first unless hostNetwork is true, if it is available, then - // fall back on the default (as determined by kubelet) DNS settings. - DNSClusterFirst DNSPolicy = "ClusterFirst" - - // DNSDefault indicates that the pod should use the default (as - // determined by kubelet) DNS settings. - DNSDefault DNSPolicy = "Default" -) - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm -} - -// A null or empty node selector term matches no objects. -type NodeSelectorTerm struct { - //Required. A list of node selector requirements. The requirements are ANDed. - MatchExpressions []NodeSelectorRequirement -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -type NodeSelectorRequirement struct { - // The label key that the selector applies to. - Key string - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - Values []string -} - -// A node selector operator is the set of operators that can be used in -// a node selector requirement. -type NodeSelectorOperator string - -const ( - NodeSelectorOpIn NodeSelectorOperator = "In" - NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" - NodeSelectorOpExists NodeSelectorOperator = "Exists" - NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" - NodeSelectorOpGt NodeSelectorOperator = "Gt" - NodeSelectorOpLt NodeSelectorOperator = "Lt" -) - -// Affinity is a group of affinity scheduling rules. -type Affinity struct { - // Describes node affinity scheduling rules for the pod. - // +optional - NodeAffinity *NodeAffinity - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAffinity *PodAffinity - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAntiAffinity *PodAntiAffinity -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -type WeightedPodAffinityTerm struct { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - Weight int32 - // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key matches that of any node on which -// a pod of the set of pods is running. -type PodAffinityTerm struct { - // A label query over a set of resources, in this case pods. - // +optional - LabelSelector *metav1.LabelSelector - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // null or empty list means "this pod's namespace" - Namespaces []string - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // Empty topologyKey is not allowed. - // +optional - TopologyKey string -} - -// Node affinity is a group of node affinity scheduling rules. -type NodeAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // will try to eventually evict the pod from its node. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -type PreferredSchedulingTerm struct { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 - // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm -} - -// The node this Taint is attached to has the "effect" on -// any pod that does not tolerate the Taint. -type Taint struct { - // Required. The taint key to be applied to a node. - Key string - // Required. The taint value corresponding to the taint key. - // +optional - Value string - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. - Effect TaintEffect - // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. - // +optional - TimeAdded metav1.Time -} - -type TaintEffect string - -const ( - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // but allow all pods submitted to Kubelet without going through the scheduler - // to start, and allow all already-running pods to continue running. - // Enforced by the scheduler. - TaintEffectNoSchedule TaintEffect = "NoSchedule" - // Like TaintEffectNoSchedule, but the scheduler tries not to schedule - // new pods onto the node, rather than prohibiting new pods from scheduling - // onto the node entirely. Enforced by the scheduler. - TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to - // Kubelet without going through the scheduler to start. - // Enforced by Kubelet and the scheduler. - // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - - // Evict any already-running pods that do not tolerate the taint. - // Currently enforced by NodeController. - TaintEffectNoExecute TaintEffect = "NoExecute" -) - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -type Toleration struct { - // Key is the taint key that the toleration applies to. Empty means match all taint keys. - // If the key is empty, operator must be Exists; this combination means to match all values and all keys. - // +optional - Key string - // Operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - // +optional - Operator TolerationOperator - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - // +optional - Value string - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - // +optional - Effect TaintEffect - // TolerationSeconds represents the period of time the toleration (which must be - // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - // it is not set, which means tolerate the taint forever (do not evict). Zero and - // negative values will be treated as 0 (evict immediately) by the system. - // +optional - TolerationSeconds *int64 -} - -// A toleration operator is the set of operators that can be used in a toleration. -type TolerationOperator string - -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -// PodSpec is a description of a pod -type PodSpec struct { - Volumes []Volume - // List of initialization containers belonging to the pod. - InitContainers []Container - // List of containers belonging to the pod. - Containers []Container - // +optional - RestartPolicy RestartPolicy - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // +optional - TerminationGracePeriodSeconds *int64 - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 - // Required: Set DNS policy. - // +optional - DNSPolicy DNSPolicy - // NodeSelector is a selector which must be true for the pod to fit on a node - // +optional - NodeSelector map[string]string - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod - // The pod will be allowed to use secrets referenced by the ServiceAccount - ServiceAccountName string - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - // +optional - AutomountServiceAccountToken *bool - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *PodSecurityContext - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // +optional - ImagePullSecrets []LocalObjectReference - // Specifies the hostname of the Pod. - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string - // If specified, the pod's scheduling constraints - // +optional - Affinity *Affinity - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string - // If specified, the pod's tolerations. - // +optional - Tolerations []Toleration - // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. - // +optional - HostAliases []HostAlias - // If specified, indicates the pod's priority. "SYSTEM" is a special keyword - // which indicates the highest priority. Any other name must be defined by - // creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. - // +optional - PriorityClassName string - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. - // +optional - Priority *int32 -} - -// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the -// pod's hosts file. -type HostAlias struct { - IP string - Hostnames []string -} - -// Sysctl defines a kernel parameter to be set -type Sysctl struct { - // Name of a property to set - Name string - // Value of a property to set - Value string -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -type PodSecurityContext struct { - // Use the host's network namespace. If this option is set, the ports that will be - // used must be specified. - // Optional: Default to false - // +k8s:conversion-gen=false - // +optional - HostNetwork bool - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostPID bool - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostIPC bool - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsUser *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - // +optional - SupplementalGroups []int64 - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - // +optional - FSGroup *int64 -} - -// PodQOSClass defines the supported qos classes of Pods. -type PodQOSClass string - -const ( - // PodQOSGuaranteed is the Guaranteed qos class. - PodQOSGuaranteed PodQOSClass = "Guaranteed" - // PodQOSBurstable is the Burstable qos class. - PodQOSBurstable PodQOSClass = "Burstable" - // PodQOSBestEffort is the BestEffort qos class. - PodQOSBestEffort PodQOSClass = "BestEffort" -) - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - // +optional - Phase PodPhase - // +optional - Conditions []PodCondition - // A human readable message indicating details about why the pod is in this state. - // +optional - Message string - // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' - // +optional - Reason string - - // +optional - HostIP string - // +optional - PodIP string - - // Date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - StartTime *metav1.Time - // +optional - QOSClass PodQOSClass - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status - InitContainerStatuses []ContainerStatus - // The list has one entry per container in the manifest. Each entry is - // currently the output of `docker inspect`. This output format is *not* - // final and should not be relied upon. - // TODO: Make real decisions about what our info should look like. Re-enable fuzz test - // when we have done this. - // +optional - ContainerStatuses []ContainerStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -type PodStatusResult struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Pod is a collection of containers, used as either input (create, update) or as output (list, get). -type Pod struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec - - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// PodTemplateSpec describes the data a pod should have when created from a template -type PodTemplateSpec struct { - // Metadata of the pods created from this template. - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplate describes a template for creating copies of a predefined pod. -type PodTemplate struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Template defines the pods that will be created from this pod template - // +optional - Template PodTemplateSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplateList is a list of PodTemplates. -type PodTemplateList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []PodTemplate -} - -// ReplicationControllerSpec is the specification of a replication controller. -// As the internal representation of a replication controller, it may have either -// a TemplateRef or a Template set. -type ReplicationControllerSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // Selector is a label query over pods that should match the Replicas count. - Selector map[string]string - - // TemplateRef is a reference to an object that describes the pod that will be created if - // insufficient replicas are detected. This reference is ignored if a Template is set. - // Must be set before converting to a versioned API object - // +optional - //TemplateRef *ObjectReference - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Internally, this takes precedence over a - // TemplateRef. - // +optional - Template *PodTemplateSpec -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -type ReplicationControllerStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - // +optional - FullyLabeledReplicas int32 - - // The number of ready replicas for this replication controller. - // +optional - ReadyReplicas int32 - - // The number of available replicas (ready for at least minReadySeconds) for this replication controller. - // +optional - AvailableReplicas int32 - - // ObservedGeneration is the most recent generation observed by the controller. - // +optional - ObservedGeneration int64 - - // Represents the latest available observations of a replication controller's current state. - // +optional - Conditions []ReplicationControllerCondition -} - -type ReplicationControllerConditionType string - -// These are valid conditions of a replication controller. -const ( - // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods - // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, - // etc. or deleted due to kubelet being down or finalizers are failing. - ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" -) - -// ReplicationControllerCondition describes the state of a replication controller at a certain point. -type ReplicationControllerCondition struct { - // Type of replication controller condition. - Type ReplicationControllerConditionType - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/extensions.Scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationController represents the configuration of a replication controller. -type ReplicationController struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this replication controller. - // +optional - Spec ReplicationControllerSpec - - // Status is the current status of this replication controller. This data may be - // out of date by some window of time. - // +optional - Status ReplicationControllerStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationControllerList is a collection of replication controllers. -type ReplicationControllerList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ReplicationController -} - -const ( - // ClusterIPNone - do not assign a cluster IP - // no proxying required and no environment variables should be created for pods - ClusterIPNone = "None" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceList holds a list of services. -type ServiceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Service -} - -// Session Affinity Type string -type ServiceAffinity string - -const ( - // ServiceAffinityClientIP is the Client IP based. - ServiceAffinityClientIP ServiceAffinity = "ClientIP" - - // ServiceAffinityNone - no session affinity. - ServiceAffinityNone ServiceAffinity = "None" -) - -const ( - // DefaultClientIPServiceAffinitySeconds is the default timeout seconds - // of Client IP based session affinity - 3 hours. - DefaultClientIPServiceAffinitySeconds int32 = 10800 - // MaxClientIPServiceAffinitySeconds is the max timeout seconds - // of Client IP based session affinity - 1 day. - MaxClientIPServiceAffinitySeconds int32 = 86400 -) - -// SessionAffinityConfig represents the configurations of session affinity. -type SessionAffinityConfig struct { - // clientIP contains the configurations of Client IP based session affinity. - // +optional - ClientIP *ClientIPConfig -} - -// ClientIPConfig represents the configurations of Client IP based session affinity. -type ClientIPConfig struct { - // timeoutSeconds specifies the seconds of ClientIP type session sticky time. - // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - // Default value is 10800(for 3 hours). - // +optional - TimeoutSeconds *int32 -} - -// Service Type string describes ingress methods for a service -type ServiceType string - -const ( - // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the ClusterIP. - ServiceTypeClusterIP ServiceType = "ClusterIP" - - // ServiceTypeNodePort means a service will be exposed on one port of - // every node, in addition to 'ClusterIP' type. - ServiceTypeNodePort ServiceType = "NodePort" - - // ServiceTypeLoadBalancer means a service will be exposed via an - // external load balancer (if the cloud provider supports it), in addition - // to 'NodePort' type. - ServiceTypeLoadBalancer ServiceType = "LoadBalancer" - - // ServiceTypeExternalName means a service consists of only a reference to - // an external name that kubedns or equivalent will return as a CNAME - // record, with no exposing or proxying of any pods involved. - ServiceTypeExternalName ServiceType = "ExternalName" -) - -// Service External Traffic Policy Type string -type ServiceExternalTrafficPolicyType string - -const ( - // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" - // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" -) - -// ServiceStatus represents the current status of a service -type ServiceStatus struct { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - // +optional - LoadBalancer LoadBalancerStatus -} - -// LoadBalancerStatus represents the status of a load-balancer -type LoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer; - // traffic intended for the service should be sent to these ingress points. - // +optional - Ingress []LoadBalancerIngress -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -type LoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - // +optional - IP string - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - // +optional - Hostname string -} - -// ServiceSpec describes the attributes that a user creates on a service -type ServiceSpec struct { - // Type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - // +optional - Type ServiceType - - // Required: The list of ports that are exposed by this service. - Ports []ServicePort - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - Selector map[string]string - - // ClusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +optional - ClusterIP string - - // ExternalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. - ExternalName string - - // ExternalIPs are used by external load balancers, or can be set by - // users to handle external traffic that arrives at a node. - // +optional - ExternalIPs []string - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - // +optional - LoadBalancerIP string - - // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. - // +optional - SessionAffinity ServiceAffinity - - // sessionAffinityConfig contains the configurations of session affinity. - // +optional - SessionAffinityConfig *SessionAffinityConfig - - // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // +optional - LoadBalancerSourceRanges []string - - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. - // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType - - // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. - // +optional - HealthCheckNodePort int32 - - // publishNotReadyAddresses, when set to true, indicates that DNS implementations - // must publish the notReadyAddresses of subsets for the Endpoints associated with - // the Service. The default value is false. - // The primary use case for setting this field is to use a StatefulSet's Headless Service - // to propagate SRV records for its Pods without respect to their readiness for purpose - // of peer discovery. - // This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints - // when that annotation is deprecated and all clients have been converted to use this - // field. - // +optional - PublishNotReadyAddresses bool -} - -type ServicePort struct { - // Optional if only one ServicePort is defined on this service: The - // name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - Name string - - // The IP protocol for this port. Supports "TCP" and "UDP". - Protocol Protocol - - // The port that will be exposed on the service. - Port int32 - - // Optional: The target port on pods selected by this service. If this - // is a string, it will be looked up as a named port in the target - // Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - TargetPort intstr.IntOrString - - // The port on each node on which this service is exposed. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - NodePort int32 -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -type Service struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a service. - // +optional - Spec ServiceSpec - - // Status represents the current status of a service. - // +optional - Status ServiceStatus -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -type ServiceAccount struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount - Secrets []ObjectReference - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // +optional - ImagePullSecrets []LocalObjectReference - - // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. - // Can be overridden at the pod level. - // +optional - AutomountServiceAccountToken *bool -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccountList is a list of ServiceAccount objects -type ServiceAccountList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ServiceAccount -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -type Endpoints struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // The set of all endpoints is the union of all subsets. - Subsets []EndpointSubset -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -type EndpointSubset struct { - Addresses []EndpointAddress - NotReadyAddresses []EndpointAddress - Ports []EndpointPort -} - -// EndpointAddress is a tuple that describes single IP address. -type EndpointAddress struct { - // The IP of this endpoint. - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, see #4447. - IP string - // Optional: Hostname of this endpoint - // Meant to be used by DNS servers etc. - // +optional - Hostname string - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - // +optional - NodeName *string - // Optional: The kubernetes object related to the entry point. - TargetRef *ObjectReference -} - -// EndpointPort is a tuple that describes a single port. -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). Optional - // if only one port is defined. Must be a DNS_LABEL. - Name string - - // The port number. - Port int32 - - // The IP protocol for this port. - Protocol Protocol -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EndpointsList is a list of endpoints. -type EndpointsList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Endpoints -} - -// NodeSpec describes the attributes that a node is created with. -type NodeSpec struct { - // PodCIDR represents the pod IP range assigned to the node - // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. - // +optional - PodCIDR string - - // External ID of the node assigned by some machine database (e.g. a cloud provider) - // +optional - ExternalID string - - // ID of the node assigned by the cloud provider - // Note: format is "://" - // +optional - ProviderID string - - // Unschedulable controls node schedulability of new pods. By default node is schedulable. - // +optional - Unschedulable bool - - // If specified, the node's taints. - // +optional - Taints []Taint - - // If specified, the source to get node configuration from - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field - // +optional - ConfigSource *NodeConfigSource -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. -type NodeConfigSource struct { - metav1.TypeMeta - ConfigMapRef *ObjectReference -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -type DaemonEndpoint struct { - /* - The port tag was not properly in quotes in earlier releases, so it must be - uppercased for backwards compat (since it was falling back to var name of - 'Port'). - */ - - // Port number of the given endpoint. - Port int32 -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -type NodeDaemonEndpoints struct { - // Endpoint on which Kubelet is listening. - // +optional - KubeletEndpoint DaemonEndpoint -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -type NodeSystemInfo struct { - // MachineID reported by the node. For unique machine identification - // in the cluster this field is preferred. Learn more from man(5) - // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html - MachineID string - // SystemUUID reported by the node. For unique machine identification - // MachineID is preferred. This field is specific to Red Hat hosts - // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html - SystemUUID string - // Boot ID reported by the node. - BootID string - // Kernel Version reported by the node. - KernelVersion string - // OS Image reported by the node. - OSImage string - // ContainerRuntime Version reported by the node. - ContainerRuntimeVersion string - // Kubelet Version reported by the node. - KubeletVersion string - // KubeProxy Version reported by the node. - KubeProxyVersion string - // The Operating System reported by the node - OperatingSystem string - // The Architecture reported by the node - Architecture string -} - -// NodeStatus is information about the current status of a node. -type NodeStatus struct { - // Capacity represents the total resources of a node. - // +optional - Capacity ResourceList - // Allocatable represents the resources of a node that are available for scheduling. - // +optional - Allocatable ResourceList - // NodePhase is the current lifecycle phase of the node. - // +optional - Phase NodePhase - // Conditions is an array of current node conditions. - // +optional - Conditions []NodeCondition - // Queried from cloud provider, if available. - // +optional - Addresses []NodeAddress - // Endpoints of daemons running on the Node. - // +optional - DaemonEndpoints NodeDaemonEndpoints - // Set of ids/uuids to uniquely identify the node. - // +optional - NodeInfo NodeSystemInfo - // List of container images on this node - // +optional - Images []ContainerImage - // List of attachable volumes in use (mounted) by the node. - // +optional - VolumesInUse []UniqueVolumeName - // List of volumes that are attached to the node. - // +optional - VolumesAttached []AttachedVolume -} - -type UniqueVolumeName string - -// AttachedVolume describes a volume attached to a node -type AttachedVolume struct { - // Name of the attached volume - Name UniqueVolumeName - - // DevicePath represents the device path where the volume should be available - DevicePath string -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -type AvoidPods struct { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - // +optional - PreferAvoidPods []PreferAvoidPodsEntry -} - -// Describes a class of pods that should avoid this node. -type PreferAvoidPodsEntry struct { - // The class of pods. - PodSignature PodSignature - // Time at which this entry was added to the list. - // +optional - EvictionTime metav1.Time - // (brief) reason why this entry was added to the list. - // +optional - Reason string - // Human readable message indicating why this entry was added to the list. - // +optional - Message string -} - -// Describes the class of pods that should avoid this node. -// Exactly one field should be set. -type PodSignature struct { - // Reference to controller whose pods should avoid this node. - // +optional - PodController *metav1.OwnerReference -} - -// Describe a container image -type ContainerImage struct { - // Names by which this image is known. - Names []string - // The size of the image in bytes. - // +optional - SizeBytes int64 -} - -type NodePhase string - -// These are the valid phases of node. -const ( - // NodePending means the node has been created/added by the system, but not configured. - NodePending NodePhase = "Pending" - // NodeRunning means the node has been configured and has Kubernetes components running. - NodeRunning NodePhase = "Running" - // NodeTerminated means the node has been removed from the cluster. - NodeTerminated NodePhase = "Terminated" -) - -type NodeConditionType string - -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: -// NodeReady, NodeReachable -const ( - // NodeReady means kubelet is healthy and ready to accept pods. - NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" - // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. - NodeMemoryPressure NodeConditionType = "MemoryPressure" - // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. - NodeDiskPressure NodeConditionType = "DiskPressure" - // NodeNetworkUnavailable means that network for the node is not correctly configured. - NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" - // NodeConfigOK indicates whether the kubelet is correctly configured - NodeConfigOK NodeConditionType = "ConfigOK" -) - -type NodeCondition struct { - Type NodeConditionType - Status ConditionStatus - // +optional - LastHeartbeatTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -type NodeAddressType string - -const ( - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" - NodeExternalDNS NodeAddressType = "ExternalDNS" - NodeInternalDNS NodeAddressType = "InternalDNS" -) - -type NodeAddress struct { - Type NodeAddressType - Address string -} - -// NodeResources is an object for conveying resource information about a node. -// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - // +optional - Capacity ResourceList -} - -// ResourceName is the name identifying various resources in a ResourceList. -type ResourceName string - -// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, -// with the -, _, and . characters allowed anywhere, except the first or last character. -// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than -// camel case, separating compound words. -// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. -const ( - // CPU, in cores. (500m = .5 cores) - ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemory ResourceName = "memory" - // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) - ResourceStorage ResourceName = "storage" - // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. - ResourceEphemeralStorage ResourceName = "ephemeral-storage" - // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. - ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" -) - -const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" - // Default namespace prefix. - ResourceDefaultNamespacePrefix = "kubernetes.io/" - // Name prefix for huge page resources (alpha). - ResourceHugePagesPrefix = "hugepages-" -) - -// ResourceList is a set of (resource name, quantity) pairs. -type ResourceList map[ResourceName]resource.Quantity - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Node is a worker node in Kubernetes -// The name of the node according to etcd is in ObjectMeta.Name. -type Node struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a node. - // +optional - Spec NodeSpec - - // Status describes the current status of a Node - // +optional - Status NodeStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeList is a list of nodes. -type NodeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Node -} - -// NamespaceSpec describes the attributes on a Namespace -type NamespaceSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage - Finalizers []FinalizerName -} - -// FinalizerName is the name identifying a finalizer during namespace lifecycle. -type FinalizerName string - -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or -// in metav1. -const ( - FinalizerKubernetes FinalizerName = "kubernetes" -) - -// NamespaceStatus is information about the current status of a Namespace. -type NamespaceStatus struct { - // Phase is the current lifecycle phase of the namespace. - // +optional - Phase NamespacePhase -} - -type NamespacePhase string - -// These are the valid phases of a namespace. -const ( - // NamespaceActive means the namespace is available for use in the system - NamespaceActive NamespacePhase = "Active" - // NamespaceTerminating means the namespace is undergoing graceful termination - NamespaceTerminating NamespacePhase = "Terminating" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A namespace provides a scope for Names. -// Use of multiple namespaces is optional -type Namespace struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of the Namespace. - // +optional - Spec NamespaceSpec - - // Status describes the current status of a Namespace - // +optional - Status NamespaceStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NamespaceList is a list of Namespaces. -type NamespaceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Namespace -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. -type Binding struct { - metav1.TypeMeta - // ObjectMeta describes the object that is being bound. - // +optional - metav1.ObjectMeta - - // Target is the object to bind to. - Target ObjectReference -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - // +optional - UID *types.UID -} - -// DeletionPropagation decides whether and how garbage collection will be performed. -type DeletionPropagation string - -const ( - // Orphans the dependents. - DeletePropagationOrphan DeletionPropagation = "Orphan" - // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. - DeletePropagationBackground DeletionPropagation = "Background" - // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. - // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. - // This policy is cascading, i.e., the dependents will be deleted with Foreground. - DeletePropagationForeground DeletionPropagation = "Foreground" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DeleteOptions may be provided when deleting an API object -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -type DeleteOptions struct { - metav1.TypeMeta - - // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // +optional - GracePeriodSeconds *int64 - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - // +optional - Preconditions *Preconditions - - // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - // Either this field or PropagationPolicy may be set, but not both. - // +optional - OrphanDependents *bool - - // Whether and how garbage collection will be performed. - // Either this field or OrphanDependents may be set, but not both. - // The default policy is decided by the existing finalizer set in the - // metadata.finalizers and the resource-specific default policy. - // +optional - PropagationPolicy *DeletionPropagation -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ListOptions is the query options to a standard REST list call, and has future support for -// watch calls. -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -type ListOptions struct { - metav1.TypeMeta - - // A selector based on labels - LabelSelector labels.Selector - // A selector based on fields - FieldSelector fields.Selector - - // If true, partially initialized resources are included in the response. - IncludeUninitialized bool - - // If true, watch for changes to this list - Watch bool - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - // When specified for list: - // - if unset, then the result is returned from remote storage based on quorum-read flag; - // - if it's 0, then we simply return what we currently have in cache, no guarantee; - // - if set to non zero, then the result is at least as fresh as given rv. - ResourceVersion string - // Timeout for the list/watch call. - TimeoutSeconds *int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodLogOptions is the query options for a Pod's logs REST call -type PodLogOptions struct { - metav1.TypeMeta - - // Container for which to return logs - Container string - // If true, follow the logs for the pod - Follow bool - // If true, return previous terminated container logs - Previous bool - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceSeconds *int64 - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *metav1.Time - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. - Timestamps bool - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - TailLines *int64 - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - LimitBytes *int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodAttachOptions is the query options to a Pod's remote attach call -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -type PodAttachOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the attach call - // +optional - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the attach call - // +optional - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the attach call - // +optional - Stderr bool - - // TTY if true indicates that a tty will be allocated for the attach call - // +optional - TTY bool - - // Container to attach to. - // +optional - Container string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodExecOptions is the query options to a Pod's remote exec call -type PodExecOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the exec call - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the exec call - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the exec call - Stderr bool - - // TTY if true indicates that a tty will be allocated for the exec call - TTY bool - - // Container in which to execute the command. - Container string - - // Command is the remote command to execute; argv array; not executed within a shell. - Command []string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPortForwardOptions is the query options to a Pod's port forward call -type PodPortForwardOptions struct { - metav1.TypeMeta - - // The list of ports to forward - // +optional - Ports []int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodProxyOptions is the query options to a Pod's proxy call -type PodProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeProxyOptions is the query options to a Node's proxy call -type NodeProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceProxyOptions is the query options to a Service's proxy call. -type ServiceProxyOptions struct { - metav1.TypeMeta - - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - Path string -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type ObjectReference struct { - // +optional - Kind string - // +optional - Namespace string - // +optional - Name string - // +optional - UID types.UID - // +optional - APIVersion string - // +optional - ResourceVersion string - - // Optional. If referring to a piece of an object instead of an entire object, this string - // should contain information to identify the sub-object. For example, if the object - // reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - // +optional - FieldPath string -} - -// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. -type LocalObjectReference struct { - //TODO: Add other useful fields. apiVersion, kind, uid? - Name string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type SerializedReference struct { - metav1.TypeMeta - // +optional - Reference ObjectReference -} - -type EventSource struct { - // Component from which the event is generated. - // +optional - Component string - // Node name on which the event is generated. - // +optional - Host string -} - -// Valid values for event types (new types could be added in future) -const ( - // Information only and will not cause any problems - EventTypeNormal string = "Normal" - // These events are to warn that something might go wrong - EventTypeWarning string = "Warning" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -type Event struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Required. The object that this event is about. - // +optional - InvolvedObject ObjectReference - - // Optional; this should be a short, machine understandable string that gives the reason - // for this event being generated. For example, if the event is reporting that a container - // can't start, the Reason might be "ImageNotFound". - // TODO: provide exact specification for format. - // +optional - Reason string - - // Optional. A human-readable description of the status of this operation. - // TODO: decide on maximum length. - // +optional - Message string - - // Optional. The component reporting this event. Should be a short machine understandable string. - // +optional - Source EventSource - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - // +optional - FirstTimestamp metav1.Time - - // The time at which the most recent occurrence of this event was recorded. - // +optional - LastTimestamp metav1.Time - - // The number of times this event has occurred. - // +optional - Count int32 - - // Type of this event (Normal, Warning), new types could be added in the future. - // +optional - Type string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EventList is a list of events. -type EventList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Event -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// List holds a list of objects, which may not be known by the server. -type List metainternalversion.List - -// A type of object that is limited -type LimitType string - -const ( - // Limit that applies to all pods in a namespace - LimitTypePod LimitType = "Pod" - // Limit that applies to all containers in a namespace - LimitTypeContainer LimitType = "Container" - // Limit that applies to all persistent volume claims in a namespace - LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" -) - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind -type LimitRangeItem struct { - // Type of resource that this limit applies to - // +optional - Type LimitType - // Max usage constraints on this kind by resource name - // +optional - Max ResourceList - // Min usage constraints on this kind by resource name - // +optional - Min ResourceList - // Default resource requirement limit value by resource name. - // +optional - Default ResourceList - // DefaultRequest resource requirement request value by resource name. - // +optional - DefaultRequest ResourceList - // MaxLimitRequestRatio represents the max burst value for the named resource - // +optional - MaxLimitRequestRatio ResourceList -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind -type LimitRangeSpec struct { - // Limits is the list of LimitRangeItem objects that are enforced - Limits []LimitRangeItem -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRange sets resource usage limits for each kind of resource in a Namespace -type LimitRange struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the limits enforced - // +optional - Spec LimitRangeSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRangeList is a list of LimitRange items. -type LimitRangeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of LimitRange objects - Items []LimitRange -} - -// The following identify resource constants for Kubernetes object types -const ( - // Pods, number - ResourcePods ResourceName = "pods" - // Services, number - ResourceServices ResourceName = "services" - // ReplicationControllers, number - ResourceReplicationControllers ResourceName = "replicationcontrollers" - // ResourceQuotas, number - ResourceQuotas ResourceName = "resourcequotas" - // ResourceSecrets, number - ResourceSecrets ResourceName = "secrets" - // ResourceConfigMaps, number - ResourceConfigMaps ResourceName = "configmaps" - // ResourcePersistentVolumeClaims, number - ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" - // ResourceServicesNodePorts, number - ResourceServicesNodePorts ResourceName = "services.nodeports" - // ResourceServicesLoadBalancers, number - ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" - // CPU request, in cores. (500m = .5 cores) - ResourceRequestsCPU ResourceName = "requests.cpu" - // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsMemory ResourceName = "requests.memory" - // Storage request, in bytes - ResourceRequestsStorage ResourceName = "requests.storage" - // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" - // CPU limit, in cores. (500m = .5 cores) - ResourceLimitsCPU ResourceName = "limits.cpu" - // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsMemory ResourceName = "limits.memory" - // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" -) - -// A ResourceQuotaScope defines a filter that must match each object tracked by a quota -type ResourceQuotaScope string - -const ( - // Match all pod objects where spec.activeDeadlineSeconds - ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" - // Match all pod objects where !spec.activeDeadlineSeconds - ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" - // Match all pod objects that have best effort quality of service - ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" - // Match all pod objects that do not have best effort quality of service - ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" -) - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota -type ResourceQuotaSpec struct { - // Hard is the set of desired hard limits for each named resource - // +optional - Hard ResourceList - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - // +optional - Scopes []ResourceQuotaScope -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use -type ResourceQuotaStatus struct { - // Hard is the set of enforced hard limits for each named resource - // +optional - Hard ResourceList - // Used is the current observed total usage of the resource in the namespace - // +optional - Used ResourceList -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -type ResourceQuota struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired quota - // +optional - Spec ResourceQuotaSpec - - // Status defines the actual enforced quota and its current usage - // +optional - Status ResourceQuotaStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuotaList is a list of ResourceQuota items -type ResourceQuotaList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of ResourceQuota objects - Items []ResourceQuota -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -type Secret struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the secret data. Each key must consist of alphanumeric - // characters, '-', '_' or '.'. The serialized form of the secret data is a - // base64 encoded string, representing the arbitrary (possibly non-string) - // data value here. - // +optional - Data map[string][]byte - - // Used to facilitate programmatic handling of secret data. - // +optional - Type SecretType -} - -const MaxSecretSize = 1 * 1024 * 1024 - -type SecretType string - -const ( - // SecretTypeOpaque is the default; arbitrary user-defined data - SecretTypeOpaque SecretType = "Opaque" - - // SecretTypeServiceAccountToken contains a token that identifies a service account to the API - // - // Required fields: - // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies - // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies - // - Secret.Data["token"] - a token that identifies the service account to the API - SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" - - // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountNameKey = "kubernetes.io/service-account.name" - // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountUIDKey = "kubernetes.io/service-account.uid" - // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets - ServiceAccountTokenKey = "token" - // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets - ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" - // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets - ServiceAccountRootCAKey = "ca.crt" - // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls - ServiceAccountNamespaceKey = "namespace" - - // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg - // - // Required fields: - // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file - SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" - - // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets - DockerConfigKey = ".dockercfg" - - // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json - // - // Required fields: - // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file - SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" - - // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets - DockerConfigJsonKey = ".dockerconfigjson" - - // SecretTypeBasicAuth contains data needed for basic authentication. - // - // Required at least one of fields: - // - Secret.Data["username"] - username used for authentication - // - Secret.Data["password"] - password or token needed for authentication - SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" - - // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets - BasicAuthUsernameKey = "username" - // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets - BasicAuthPasswordKey = "password" - - // SecretTypeSSHAuth contains data needed for SSH authetication. - // - // Required field: - // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication - SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" - - // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets - SSHAuthPrivateKey = "ssh-privatekey" - - // SecretTypeTLS contains information about a TLS client or server secret. It - // is primarily used with TLS termination of the Ingress resource, but may be - // used in other types. - // - // Required fields: - // - Secret.Data["tls.key"] - TLS private key. - // Secret.Data["tls.crt"] - TLS certificate. - // TODO: Consider supporting different formats, specifying CA/destinationCA. - SecretTypeTLS SecretType = "kubernetes.io/tls" - - // TLSCertKey is the key for tls certificates in a TLS secret. - TLSCertKey = "tls.crt" - // TLSPrivateKeyKey is the key for the private key field in a TLS secret. - TLSPrivateKeyKey = "tls.key" - // SecretTypeBootstrapToken is used during the automated bootstrap process (first - // implemented by kubeadm). It stores tokens that are used to sign well known - // ConfigMaps. They are used for authn. - SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type SecretList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Secret -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMap holds configuration data for components or applications to consume. -type ConfigMap struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the configuration data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // +optional - Data map[string]string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMapList is a resource containing a list of ConfigMap objects. -type ConfigMapList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is the list of ConfigMaps. - Items []ConfigMap -} - -// These constants are for remote command execution and port forwarding and are -// used by both the client side and server side components. -// -// This is probably not the ideal place for them, but it didn't seem worth it -// to create pkg/exec and pkg/portforward just to contain a single file with -// constants in it. Suggestions for more appropriate alternatives are -// definitely welcome! -const ( - // Enable stdin for remote command execution - ExecStdinParam = "input" - // Enable stdout for remote command execution - ExecStdoutParam = "output" - // Enable stderr for remote command execution - ExecStderrParam = "error" - // Enable TTY for remote command execution - ExecTTYParam = "tty" - // Command to run for remote command execution - ExecCommandParam = "command" - - // Name of header that specifies stream type - StreamType = "streamType" - // Value for streamType header for stdin stream - StreamTypeStdin = "stdin" - // Value for streamType header for stdout stream - StreamTypeStdout = "stdout" - // Value for streamType header for stderr stream - StreamTypeStderr = "stderr" - // Value for streamType header for data stream - StreamTypeData = "data" - // Value for streamType header for error stream - StreamTypeError = "error" - // Value for streamType header for terminal resize stream - StreamTypeResize = "resize" - - // Name of header that specifies the port being forwarded - PortHeader = "port" - // Name of header that specifies a request ID used to associate the error - // and data streams for a single forwarded connection - PortForwardRequestIDHeader = "requestID" -) - -// Type and constants for component health validation. -type ComponentConditionType string - -// These are the valid conditions for the component. -const ( - ComponentHealthy ComponentConditionType = "Healthy" -) - -type ComponentCondition struct { - Type ComponentConditionType - Status ConditionStatus - // +optional - Message string - // +optional - Error string -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -type ComponentStatus struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // +optional - Conditions []ComponentCondition -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ComponentStatusList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ComponentStatus -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -type SecurityContext struct { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - // +optional - Capabilities *Capabilities - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - // +optional - Privileged *bool - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsUser *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // The read-only root filesystem allows you to restrict the locations that an application can write - // files to, ensuring the persistent data can only be written to mounts. - // +optional - ReadOnlyRootFilesystem *bool - // AllowPrivilegeEscalation controls whether a process can gain more - // privileges than its parent process. This bool directly controls if - // the no_new_privs flag will be set on the container process. - // +optional - AllowPrivilegeEscalation *bool -} - -// SELinuxOptions are the labels to be applied to the container. -type SELinuxOptions struct { - // SELinux user label - // +optional - User string - // SELinux role label - // +optional - Role string - // SELinux type label - // +optional - Type string - // SELinux level label. - // +optional - Level string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record -// the global allocation state of the cluster. The schema of Range and Data generic, in that Range -// should be a string representation of the inputs to a range (for instance, for IP allocation it -// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a -// binary range. Consumers should use annotations to record additional information (schema version, -// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation -// of the cluster, thus the object is less strongly typed than most. -type RangeAllocation struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or - // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define - // a start and end unless there is an implicit end. - Range string - // A byte array representing the serialized state of a range allocation. Additional clarifiers on - // the type or format of data should be represented with annotations. For IP allocations, this is - // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing - // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). - Data []byte -} - -const ( - // "default-scheduler" is the name of default scheduler. - DefaultSchedulerName = "default-scheduler" - - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // When the --hard-pod-affinity-weight scheduler flag is not specified, - // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go new file mode 100644 index 000000000..67fc6e5ed --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go @@ -0,0 +1,296 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "fmt" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// FindPort locates the container port for the given pod and portName. If the +// targetPort is a number, use that. If the targetPort is a string, look that +// string up in all named ports in all containers in the target pod. If no +// match is found, fail. +func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) { + portName := svcPort.TargetPort + switch portName.Type { + case intstr.String: + name := portName.StrVal + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + case intstr.Int: + return portName.IntValue(), nil + } + + return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) +} + +// Visitor is called with each object name, and returns true if visiting should continue +type Visitor func(name string) (shouldContinue bool) + +// VisitPodSecretNames invokes the visitor function with the name of every secret +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool { + for _, reference := range pod.Spec.ImagePullSecrets { + if !visitor(reference.Name) { + return false + } + } + for i := range pod.Spec.InitContainers { + if !visitContainerSecretNames(&pod.Spec.InitContainers[i], visitor) { + return false + } + } + for i := range pod.Spec.Containers { + if !visitContainerSecretNames(&pod.Spec.Containers[i], visitor) { + return false + } + } + var source *v1.VolumeSource + + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.AzureFile != nil: + if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) { + return false + } + case source.CephFS != nil: + if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) { + return false + } + case source.FlexVolume != nil: + if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) { + return false + } + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].Secret != nil { + if !visitor(source.Projected.Sources[j].Secret.Name) { + return false + } + } + } + case source.RBD != nil: + if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) { + return false + } + case source.Secret != nil: + if !visitor(source.Secret.SecretName) { + return false + } + case source.ScaleIO != nil: + if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) { + return false + } + case source.ISCSI != nil: + if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) { + return false + } + case source.StorageOS != nil: + if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) { + return false + } + } + } + return true +} + +func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.SecretRef != nil { + if !visitor(env.SecretRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil { + if !visitor(envVar.ValueFrom.SecretKeyRef.Name) { + return false + } + } + } + return true +} + +// VisitPodConfigmapNames invokes the visitor function with the name of every configmap +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool { + for i := range pod.Spec.InitContainers { + if !visitContainerConfigmapNames(&pod.Spec.InitContainers[i], visitor) { + return false + } + } + for i := range pod.Spec.Containers { + if !visitContainerConfigmapNames(&pod.Spec.Containers[i], visitor) { + return false + } + } + var source *v1.VolumeSource + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].ConfigMap != nil { + if !visitor(source.Projected.Sources[j].ConfigMap.Name) { + return false + } + } + } + case source.ConfigMap != nil: + if !visitor(source.ConfigMap.Name) { + return false + } + } + } + return true +} + +func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.ConfigMapRef != nil { + if !visitor(env.ConfigMapRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil { + if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) { + return false + } + } + } + return true +} + +// GetContainerStatus extracts the status of container "name" from "statuses". +// It also returns if "name" exists. +func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return v1.ContainerStatus{}, false +} + +// GetExistingContainerStatus extracts the status of container "name" from "statuses", +// and returns empty status if "name" does not exist. +func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i] + } + } + return v1.ContainerStatus{} +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *v1.Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReady retruns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status v1.PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// Extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } else { + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go deleted file mode 100644 index 938b7316a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go +++ /dev/null @@ -1,6320 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package api - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AWSElasticBlockStoreVolumeSource).DeepCopyInto(out.(*AWSElasticBlockStoreVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Affinity).DeepCopyInto(out.(*Affinity)) - return nil - }, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AttachedVolume).DeepCopyInto(out.(*AttachedVolume)) - return nil - }, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AvoidPods).DeepCopyInto(out.(*AvoidPods)) - return nil - }, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureDiskVolumeSource).DeepCopyInto(out.(*AzureDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFilePersistentVolumeSource).DeepCopyInto(out.(*AzureFilePersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFilePersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFileVolumeSource).DeepCopyInto(out.(*AzureFileVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Binding).DeepCopyInto(out.(*Binding)) - return nil - }, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Capabilities).DeepCopyInto(out.(*Capabilities)) - return nil - }, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSPersistentVolumeSource).DeepCopyInto(out.(*CephFSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSVolumeSource).DeepCopyInto(out.(*CephFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CinderVolumeSource).DeepCopyInto(out.(*CinderVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientIPConfig).DeepCopyInto(out.(*ClientIPConfig)) - return nil - }, InType: reflect.TypeOf(&ClientIPConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentCondition).DeepCopyInto(out.(*ComponentCondition)) - return nil - }, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatus).DeepCopyInto(out.(*ComponentStatus)) - return nil - }, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatusList).DeepCopyInto(out.(*ComponentStatusList)) - return nil - }, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMap).DeepCopyInto(out.(*ConfigMap)) - return nil - }, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapEnvSource).DeepCopyInto(out.(*ConfigMapEnvSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapKeySelector).DeepCopyInto(out.(*ConfigMapKeySelector)) - return nil - }, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapList).DeepCopyInto(out.(*ConfigMapList)) - return nil - }, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapProjection).DeepCopyInto(out.(*ConfigMapProjection)) - return nil - }, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapVolumeSource).DeepCopyInto(out.(*ConfigMapVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Container).DeepCopyInto(out.(*Container)) - return nil - }, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerImage).DeepCopyInto(out.(*ContainerImage)) - return nil - }, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerPort).DeepCopyInto(out.(*ContainerPort)) - return nil - }, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerState).DeepCopyInto(out.(*ContainerState)) - return nil - }, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateRunning).DeepCopyInto(out.(*ContainerStateRunning)) - return nil - }, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateTerminated).DeepCopyInto(out.(*ContainerStateTerminated)) - return nil - }, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateWaiting).DeepCopyInto(out.(*ContainerStateWaiting)) - return nil - }, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStatus).DeepCopyInto(out.(*ContainerStatus)) - return nil - }, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonEndpoint).DeepCopyInto(out.(*DaemonEndpoint)) - return nil - }, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIProjection).DeepCopyInto(out.(*DownwardAPIProjection)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeFile).DeepCopyInto(out.(*DownwardAPIVolumeFile)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeSource).DeepCopyInto(out.(*DownwardAPIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EmptyDirVolumeSource).DeepCopyInto(out.(*EmptyDirVolumeSource)) - return nil - }, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointAddress).DeepCopyInto(out.(*EndpointAddress)) - return nil - }, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointPort).DeepCopyInto(out.(*EndpointPort)) - return nil - }, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointSubset).DeepCopyInto(out.(*EndpointSubset)) - return nil - }, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Endpoints).DeepCopyInto(out.(*Endpoints)) - return nil - }, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointsList).DeepCopyInto(out.(*EndpointsList)) - return nil - }, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvFromSource).DeepCopyInto(out.(*EnvFromSource)) - return nil - }, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVar).DeepCopyInto(out.(*EnvVar)) - return nil - }, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVarSource).DeepCopyInto(out.(*EnvVarSource)) - return nil - }, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventSource).DeepCopyInto(out.(*EventSource)) - return nil - }, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExecAction).DeepCopyInto(out.(*ExecAction)) - return nil - }, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FCVolumeSource).DeepCopyInto(out.(*FCVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlexVolumeSource).DeepCopyInto(out.(*FlexVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlockerVolumeSource).DeepCopyInto(out.(*FlockerVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GCEPersistentDiskVolumeSource).DeepCopyInto(out.(*GCEPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GitRepoVolumeSource).DeepCopyInto(out.(*GitRepoVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GlusterfsVolumeSource).DeepCopyInto(out.(*GlusterfsVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPGetAction).DeepCopyInto(out.(*HTTPGetAction)) - return nil - }, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPHeader).DeepCopyInto(out.(*HTTPHeader)) - return nil - }, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Handler).DeepCopyInto(out.(*Handler)) - return nil - }, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostAlias).DeepCopyInto(out.(*HostAlias)) - return nil - }, InType: reflect.TypeOf(&HostAlias{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPathVolumeSource).DeepCopyInto(out.(*HostPathVolumeSource)) - return nil - }, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ISCSIVolumeSource).DeepCopyInto(out.(*ISCSIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KeyToPath).DeepCopyInto(out.(*KeyToPath)) - return nil - }, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Lifecycle).DeepCopyInto(out.(*Lifecycle)) - return nil - }, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRange).DeepCopyInto(out.(*LimitRange)) - return nil - }, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeItem).DeepCopyInto(out.(*LimitRangeItem)) - return nil - }, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeList).DeepCopyInto(out.(*LimitRangeList)) - return nil - }, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeSpec).DeepCopyInto(out.(*LimitRangeSpec)) - return nil - }, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerIngress).DeepCopyInto(out.(*LoadBalancerIngress)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerStatus).DeepCopyInto(out.(*LoadBalancerStatus)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalObjectReference).DeepCopyInto(out.(*LocalObjectReference)) - return nil - }, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalVolumeSource).DeepCopyInto(out.(*LocalVolumeSource)) - return nil - }, InType: reflect.TypeOf(&LocalVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NFSVolumeSource).DeepCopyInto(out.(*NFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Namespace).DeepCopyInto(out.(*Namespace)) - return nil - }, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceList).DeepCopyInto(out.(*NamespaceList)) - return nil - }, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceSpec).DeepCopyInto(out.(*NamespaceSpec)) - return nil - }, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceStatus).DeepCopyInto(out.(*NamespaceStatus)) - return nil - }, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Node).DeepCopyInto(out.(*Node)) - return nil - }, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAddress).DeepCopyInto(out.(*NodeAddress)) - return nil - }, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAffinity).DeepCopyInto(out.(*NodeAffinity)) - return nil - }, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeCondition).DeepCopyInto(out.(*NodeCondition)) - return nil - }, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeConfigSource).DeepCopyInto(out.(*NodeConfigSource)) - return nil - }, InType: reflect.TypeOf(&NodeConfigSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeDaemonEndpoints).DeepCopyInto(out.(*NodeDaemonEndpoints)) - return nil - }, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeList).DeepCopyInto(out.(*NodeList)) - return nil - }, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeProxyOptions).DeepCopyInto(out.(*NodeProxyOptions)) - return nil - }, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeResources).DeepCopyInto(out.(*NodeResources)) - return nil - }, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelector).DeepCopyInto(out.(*NodeSelector)) - return nil - }, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorRequirement).DeepCopyInto(out.(*NodeSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorTerm).DeepCopyInto(out.(*NodeSelectorTerm)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSpec).DeepCopyInto(out.(*NodeSpec)) - return nil - }, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeStatus).DeepCopyInto(out.(*NodeStatus)) - return nil - }, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSystemInfo).DeepCopyInto(out.(*NodeSystemInfo)) - return nil - }, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectFieldSelector).DeepCopyInto(out.(*ObjectFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolume).DeepCopyInto(out.(*PersistentVolume)) - return nil - }, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimCondition).DeepCopyInto(out.(*PersistentVolumeClaimCondition)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimSpec).DeepCopyInto(out.(*PersistentVolumeClaimSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimStatus).DeepCopyInto(out.(*PersistentVolumeClaimStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimVolumeSource).DeepCopyInto(out.(*PersistentVolumeClaimVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeList).DeepCopyInto(out.(*PersistentVolumeList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSource).DeepCopyInto(out.(*PersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSpec).DeepCopyInto(out.(*PersistentVolumeSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeStatus).DeepCopyInto(out.(*PersistentVolumeStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PhotonPersistentDiskVolumeSource).DeepCopyInto(out.(*PhotonPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Pod).DeepCopyInto(out.(*Pod)) - return nil - }, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinity).DeepCopyInto(out.(*PodAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinityTerm).DeepCopyInto(out.(*PodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAntiAffinity).DeepCopyInto(out.(*PodAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAttachOptions).DeepCopyInto(out.(*PodAttachOptions)) - return nil - }, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodCondition).DeepCopyInto(out.(*PodCondition)) - return nil - }, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodExecOptions).DeepCopyInto(out.(*PodExecOptions)) - return nil - }, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodList).DeepCopyInto(out.(*PodList)) - return nil - }, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodLogOptions).DeepCopyInto(out.(*PodLogOptions)) - return nil - }, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPortForwardOptions).DeepCopyInto(out.(*PodPortForwardOptions)) - return nil - }, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodProxyOptions).DeepCopyInto(out.(*PodProxyOptions)) - return nil - }, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityContext).DeepCopyInto(out.(*PodSecurityContext)) - return nil - }, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSignature).DeepCopyInto(out.(*PodSignature)) - return nil - }, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSpec).DeepCopyInto(out.(*PodSpec)) - return nil - }, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatus).DeepCopyInto(out.(*PodStatus)) - return nil - }, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatusResult).DeepCopyInto(out.(*PodStatusResult)) - return nil - }, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplate).DeepCopyInto(out.(*PodTemplate)) - return nil - }, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateList).DeepCopyInto(out.(*PodTemplateList)) - return nil - }, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateSpec).DeepCopyInto(out.(*PodTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PortworxVolumeSource).DeepCopyInto(out.(*PortworxVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferAvoidPodsEntry).DeepCopyInto(out.(*PreferAvoidPodsEntry)) - return nil - }, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferredSchedulingTerm).DeepCopyInto(out.(*PreferredSchedulingTerm)) - return nil - }, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Probe).DeepCopyInto(out.(*Probe)) - return nil - }, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ProjectedVolumeSource).DeepCopyInto(out.(*ProjectedVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource)) - return nil - }, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RangeAllocation).DeepCopyInto(out.(*RangeAllocation)) - return nil - }, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationController).DeepCopyInto(out.(*ReplicationController)) - return nil - }, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerCondition).DeepCopyInto(out.(*ReplicationControllerCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerList).DeepCopyInto(out.(*ReplicationControllerList)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerSpec).DeepCopyInto(out.(*ReplicationControllerSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerStatus).DeepCopyInto(out.(*ReplicationControllerStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceFieldSelector).DeepCopyInto(out.(*ResourceFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuota).DeepCopyInto(out.(*ResourceQuota)) - return nil - }, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaList).DeepCopyInto(out.(*ResourceQuotaList)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaSpec).DeepCopyInto(out.(*ResourceQuotaSpec)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaStatus).DeepCopyInto(out.(*ResourceQuotaStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRequirements).DeepCopyInto(out.(*ResourceRequirements)) - return nil - }, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Secret).DeepCopyInto(out.(*Secret)) - return nil - }, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretEnvSource).DeepCopyInto(out.(*SecretEnvSource)) - return nil - }, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretKeySelector).DeepCopyInto(out.(*SecretKeySelector)) - return nil - }, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretList).DeepCopyInto(out.(*SecretList)) - return nil - }, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretProjection).DeepCopyInto(out.(*SecretProjection)) - return nil - }, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretReference).DeepCopyInto(out.(*SecretReference)) - return nil - }, InType: reflect.TypeOf(&SecretReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretVolumeSource).DeepCopyInto(out.(*SecretVolumeSource)) - return nil - }, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecurityContext).DeepCopyInto(out.(*SecurityContext)) - return nil - }, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SerializedReference).DeepCopyInto(out.(*SerializedReference)) - return nil - }, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Service).DeepCopyInto(out.(*Service)) - return nil - }, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccount).DeepCopyInto(out.(*ServiceAccount)) - return nil - }, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccountList).DeepCopyInto(out.(*ServiceAccountList)) - return nil - }, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceList).DeepCopyInto(out.(*ServiceList)) - return nil - }, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServicePort).DeepCopyInto(out.(*ServicePort)) - return nil - }, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceProxyOptions).DeepCopyInto(out.(*ServiceProxyOptions)) - return nil - }, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceSpec).DeepCopyInto(out.(*ServiceSpec)) - return nil - }, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceStatus).DeepCopyInto(out.(*ServiceStatus)) - return nil - }, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SessionAffinityConfig).DeepCopyInto(out.(*SessionAffinityConfig)) - return nil - }, InType: reflect.TypeOf(&SessionAffinityConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSPersistentVolumeSource).DeepCopyInto(out.(*StorageOSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSVolumeSource).DeepCopyInto(out.(*StorageOSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Sysctl).DeepCopyInto(out.(*Sysctl)) - return nil - }, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TCPSocketAction).DeepCopyInto(out.(*TCPSocketAction)) - return nil - }, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Taint).DeepCopyInto(out.(*Taint)) - return nil - }, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Toleration).DeepCopyInto(out.(*Toleration)) - return nil - }, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Volume).DeepCopyInto(out.(*Volume)) - return nil - }, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeMount).DeepCopyInto(out.(*VolumeMount)) - return nil - }, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeProjection).DeepCopyInto(out.(*VolumeProjection)) - return nil - }, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeSource).DeepCopyInto(out.(*VolumeSource)) - return nil - }, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VsphereVirtualDiskVolumeSource).DeepCopyInto(out.(*VsphereVirtualDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WeightedPodAffinityTerm).DeepCopyInto(out.(*WeightedPodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { - if in == nil { - return nil - } - out := new(AWSElasticBlockStoreVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Affinity) DeepCopyInto(out *Affinity) { - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - if *in == nil { - *out = nil - } else { - *out = new(NodeAffinity) - (*in).DeepCopyInto(*out) - } - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - if *in == nil { - *out = nil - } else { - *out = new(PodAffinity) - (*in).DeepCopyInto(*out) - } - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - if *in == nil { - *out = nil - } else { - *out = new(PodAntiAffinity) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. -func (in *Affinity) DeepCopy() *Affinity { - if in == nil { - return nil - } - out := new(Affinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. -func (in *AttachedVolume) DeepCopy() *AttachedVolume { - if in == nil { - return nil - } - out := new(AttachedVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { - *out = *in - if in.PreferAvoidPods != nil { - in, out := &in.PreferAvoidPods, &out.PreferAvoidPods - *out = make([]PreferAvoidPodsEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. -func (in *AvoidPods) DeepCopy() *AvoidPods { - if in == nil { - return nil - } - out := new(AvoidPods) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { - *out = *in - if in.CachingMode != nil { - in, out := &in.CachingMode, &out.CachingMode - if *in == nil { - *out = nil - } else { - *out = new(AzureDataDiskCachingMode) - **out = **in - } - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.Kind != nil { - in, out := &in.Kind, &out.Kind - if *in == nil { - *out = nil - } else { - *out = new(AzureDataDiskKind) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. -func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { - if in == nil { - return nil - } - out := new(AzureDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { - *out = *in - if in.SecretNamespace != nil { - in, out := &in.SecretNamespace, &out.SecretNamespace - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. -func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { - if in == nil { - return nil - } - out := new(AzureFilePersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. -func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { - if in == nil { - return nil - } - out := new(AzureFileVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Binding) DeepCopyInto(out *Binding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Target = in.Target - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. -func (in *Binding) DeepCopy() *Binding { - if in == nil { - return nil - } - out := new(Binding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Binding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Capabilities) DeepCopyInto(out *Capabilities) { - *out = *in - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. -func (in *Capabilities) DeepCopy() *Capabilities { - if in == nil { - return nil - } - out := new(Capabilities) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. -func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CephFSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. -func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { - if in == nil { - return nil - } - out := new(CephFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. -func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { - if in == nil { - return nil - } - out := new(CinderVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { - *out = *in - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. -func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { - if in == nil { - return nil - } - out := new(ClientIPConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. -func (in *ComponentCondition) DeepCopy() *ComponentCondition { - if in == nil { - return nil - } - out := new(ComponentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. -func (in *ComponentStatus) DeepCopy() *ComponentStatus { - if in == nil { - return nil - } - out := new(ComponentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComponentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. -func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { - if in == nil { - return nil - } - out := new(ComponentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatusList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. -func (in *ConfigMap) DeepCopy() *ConfigMap { - if in == nil { - return nil - } - out := new(ConfigMap) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMap) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. -func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { - if in == nil { - return nil - } - out := new(ConfigMapEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. -func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { - if in == nil { - return nil - } - out := new(ConfigMapKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ConfigMap, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. -func (in *ConfigMapList) DeepCopy() *ConfigMapList { - if in == nil { - return nil - } - out := new(ConfigMapList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMapList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. -func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { - if in == nil { - return nil - } - out := new(ConfigMapProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. -func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { - if in == nil { - return nil - } - out := new(ConfigMapVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Container) DeepCopyInto(out *Container) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - if *in == nil { - *out = nil - } else { - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - if *in == nil { - *out = nil - } else { - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - if *in == nil { - *out = nil - } else { - *out = new(Lifecycle) - (*in).DeepCopyInto(*out) - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - if *in == nil { - *out = nil - } else { - *out = new(SecurityContext) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. -func (in *Container) DeepCopy() *Container { - if in == nil { - return nil - } - out := new(Container) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. -func (in *ContainerImage) DeepCopy() *ContainerImage { - if in == nil { - return nil - } - out := new(ContainerImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. -func (in *ContainerPort) DeepCopy() *ContainerPort { - if in == nil { - return nil - } - out := new(ContainerPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerState) DeepCopyInto(out *ContainerState) { - *out = *in - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateWaiting) - **out = **in - } - } - if in.Running != nil { - in, out := &in.Running, &out.Running - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateRunning) - (*in).DeepCopyInto(*out) - } - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateTerminated) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. -func (in *ContainerState) DeepCopy() *ContainerState { - if in == nil { - return nil - } - out := new(ContainerState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. -func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { - if in == nil { - return nil - } - out := new(ContainerStateRunning) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. -func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { - if in == nil { - return nil - } - out := new(ContainerStateTerminated) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. -func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { - if in == nil { - return nil - } - out := new(ContainerStateWaiting) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { - *out = *in - in.State.DeepCopyInto(&out.State) - in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. -func (in *ContainerStatus) DeepCopy() *ContainerStatus { - if in == nil { - return nil - } - out := new(ContainerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. -func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { - if in == nil { - return nil - } - out := new(DaemonEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.GracePeriodSeconds != nil { - in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.Preconditions != nil { - in, out := &in.Preconditions, &out.Preconditions - if *in == nil { - *out = nil - } else { - *out = new(Preconditions) - (*in).DeepCopyInto(*out) - } - } - if in.OrphanDependents != nil { - in, out := &in.OrphanDependents, &out.OrphanDependents - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.PropagationPolicy != nil { - in, out := &in.PropagationPolicy, &out.PropagationPolicy - if *in == nil { - *out = nil - } else { - *out = new(DeletionPropagation) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions. -func (in *DeleteOptions) DeepCopy() *DeleteOptions { - if in == nil { - return nil - } - out := new(DeleteOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeleteOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. -func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { - if in == nil { - return nil - } - out := new(DownwardAPIProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectFieldSelector) - **out = **in - } - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - if *in == nil { - *out = nil - } else { - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. -func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeFile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. -func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { - *out = *in - if in.SizeLimit != nil { - in, out := &in.SizeLimit, &out.SizeLimit - if *in == nil { - *out = nil - } else { - *out = new(resource.Quantity) - **out = (*in).DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. -func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { - if in == nil { - return nil - } - out := new(EmptyDirVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { - *out = *in - if in.NodeName != nil { - in, out := &in.NodeName, &out.NodeName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. -func (in *EndpointAddress) DeepCopy() *EndpointAddress { - if in == nil { - return nil - } - out := new(EndpointAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. -func (in *EndpointPort) DeepCopy() *EndpointPort { - if in == nil { - return nil - } - out := new(EndpointPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]EndpointPort, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. -func (in *EndpointSubset) DeepCopy() *EndpointSubset { - if in == nil { - return nil - } - out := new(EndpointSubset) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Endpoints) DeepCopyInto(out *Endpoints) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. -func (in *Endpoints) DeepCopy() *Endpoints { - if in == nil { - return nil - } - out := new(Endpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Endpoints) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Endpoints, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. -func (in *EndpointsList) DeepCopy() *EndpointsList { - if in == nil { - return nil - } - out := new(EndpointsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EndpointsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { - *out = *in - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapEnvSource) - (*in).DeepCopyInto(*out) - } - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretEnvSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. -func (in *EnvFromSource) DeepCopy() *EnvFromSource { - if in == nil { - return nil - } - out := new(EnvFromSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVar) DeepCopyInto(out *EnvVar) { - *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - if *in == nil { - *out = nil - } else { - *out = new(EnvVarSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. -func (in *EnvVar) DeepCopy() *EnvVar { - if in == nil { - return nil - } - out := new(EnvVar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectFieldSelector) - **out = **in - } - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - if *in == nil { - *out = nil - } else { - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - if *in == nil { - *out = nil - } else { - *out = new(SecretKeySelector) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. -func (in *EnvVarSource) DeepCopy() *EnvVarSource { - if in == nil { - return nil - } - out := new(EnvVarSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Event) DeepCopyInto(out *Event) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.InvolvedObject = in.InvolvedObject - out.Source = in.Source - in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) - in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. -func (in *Event) DeepCopy() *Event { - if in == nil { - return nil - } - out := new(Event) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Event) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventList) DeepCopyInto(out *EventList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. -func (in *EventList) DeepCopy() *EventList { - if in == nil { - return nil - } - out := new(EventList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EventList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSource) DeepCopyInto(out *EventSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. -func (in *EventSource) DeepCopy() *EventSource { - if in == nil { - return nil - } - out := new(EventSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecAction) DeepCopyInto(out *ExecAction) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. -func (in *ExecAction) DeepCopy() *ExecAction { - if in == nil { - return nil - } - out := new(ExecAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { - *out = *in - if in.TargetWWNs != nil { - in, out := &in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - if in.WWIDs != nil { - in, out := &in.WWIDs, &out.WWIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. -func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { - if in == nil { - return nil - } - out := new(FCVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. -func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { - if in == nil { - return nil - } - out := new(FlexVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. -func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { - if in == nil { - return nil - } - out := new(FlockerVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. -func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(GCEPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. -func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { - if in == nil { - return nil - } - out := new(GitRepoVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. -func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { - if in == nil { - return nil - } - out := new(GlusterfsVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { - *out = *in - out.Port = in.Port - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. -func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { - if in == nil { - return nil - } - out := new(HTTPGetAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. -func (in *HTTPHeader) DeepCopy() *HTTPHeader { - if in == nil { - return nil - } - out := new(HTTPHeader) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Handler) DeepCopyInto(out *Handler) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - if *in == nil { - *out = nil - } else { - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - if *in == nil { - *out = nil - } else { - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - if *in == nil { - *out = nil - } else { - *out = new(TCPSocketAction) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. -func (in *Handler) DeepCopy() *Handler { - if in == nil { - return nil - } - out := new(Handler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostAlias) DeepCopyInto(out *HostAlias) { - *out = *in - if in.Hostnames != nil { - in, out := &in.Hostnames, &out.Hostnames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. -func (in *HostAlias) DeepCopy() *HostAlias { - if in == nil { - return nil - } - out := new(HostAlias) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { - *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type - if *in == nil { - *out = nil - } else { - *out = new(HostPathType) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. -func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { - if in == nil { - return nil - } - out := new(HostPathVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - if in.InitiatorName != nil { - in, out := &in.InitiatorName, &out.InitiatorName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. -func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { - if in == nil { - return nil - } - out := new(ISCSIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. -func (in *KeyToPath) DeepCopy() *KeyToPath { - if in == nil { - return nil - } - out := new(KeyToPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { - *out = *in - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - if *in == nil { - *out = nil - } else { - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - if *in == nil { - *out = nil - } else { - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. -func (in *Lifecycle) DeepCopy() *Lifecycle { - if in == nil { - return nil - } - out := new(Lifecycle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRange) DeepCopyInto(out *LimitRange) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. -func (in *LimitRange) DeepCopy() *LimitRange { - if in == nil { - return nil - } - out := new(LimitRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRange) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequest != nil { - in, out := &in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.MaxLimitRequestRatio != nil { - in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. -func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { - if in == nil { - return nil - } - out := new(LimitRangeItem) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LimitRange, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. -func (in *LimitRangeList) DeepCopy() *LimitRangeList { - if in == nil { - return nil - } - out := new(LimitRangeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRangeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. -func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { - if in == nil { - return nil - } - out := new(LimitRangeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *List) DeepCopyInto(out *List) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if (*in)[i] == nil { - (*out)[i] = nil - } else { - (*out)[i] = (*in)[i].DeepCopyObject() - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. -func (in *List) DeepCopy() *List { - if in == nil { - return nil - } - out := new(List) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *List) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ListOptions) DeepCopyInto(out *ListOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.LabelSelector == nil { - out.LabelSelector = nil - } else { - out.LabelSelector = in.LabelSelector.DeepCopySelector() - } - if in.FieldSelector == nil { - out.FieldSelector = nil - } else { - out.FieldSelector = in.FieldSelector.DeepCopySelector() - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. -func (in *ListOptions) DeepCopy() *ListOptions { - if in == nil { - return nil - } - out := new(ListOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ListOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. -func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { - if in == nil { - return nil - } - out := new(LoadBalancerIngress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { - *out = *in - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. -func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { - if in == nil { - return nil - } - out := new(LoadBalancerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. -func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { - if in == nil { - return nil - } - out := new(LocalObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. -func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { - if in == nil { - return nil - } - out := new(LocalVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. -func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { - if in == nil { - return nil - } - out := new(NFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Namespace) DeepCopyInto(out *Namespace) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. -func (in *Namespace) DeepCopy() *Namespace { - if in == nil { - return nil - } - out := new(Namespace) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Namespace) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Namespace, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. -func (in *NamespaceList) DeepCopy() *NamespaceList { - if in == nil { - return nil - } - out := new(NamespaceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NamespaceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { - *out = *in - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. -func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { - if in == nil { - return nil - } - out := new(NamespaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. -func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { - if in == nil { - return nil - } - out := new(NamespaceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Node) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. -func (in *NodeAddress) DeepCopy() *NodeAddress { - if in == nil { - return nil - } - out := new(NodeAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - if *in == nil { - *out = nil - } else { - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. -func (in *NodeAffinity) DeepCopy() *NodeAffinity { - if in == nil { - return nil - } - out := new(NodeAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { - *out = *in - in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. -func (in *NodeCondition) DeepCopy() *NodeCondition { - if in == nil { - return nil - } - out := new(NodeCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. -func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { - if in == nil { - return nil - } - out := new(NodeConfigSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeConfigSource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { - *out = *in - out.KubeletEndpoint = in.KubeletEndpoint - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. -func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { - if in == nil { - return nil - } - out := new(NodeDaemonEndpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeList) DeepCopyInto(out *NodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. -func (in *NodeList) DeepCopy() *NodeList { - if in == nil { - return nil - } - out := new(NodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. -func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { - if in == nil { - return nil - } - out := new(NodeProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResources) DeepCopyInto(out *NodeResources) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. -func (in *NodeResources) DeepCopy() *NodeResources { - if in == nil { - return nil - } - out := new(NodeResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { - *out = *in - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. -func (in *NodeSelector) DeepCopy() *NodeSelector { - if in == nil { - return nil - } - out := new(NodeSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. -func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { - if in == nil { - return nil - } - out := new(NodeSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. -func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { - if in == nil { - return nil - } - out := new(NodeSelectorTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { - *out = *in - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - if *in == nil { - *out = nil - } else { - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. -func (in *NodeSpec) DeepCopy() *NodeSpec { - if in == nil { - return nil - } - out := new(NodeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(*in)) - copy(*out, *in) - } - out.DaemonEndpoints = in.DaemonEndpoints - out.NodeInfo = in.NodeInfo - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ContainerImage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(*in)) - copy(*out, *in) - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. -func (in *NodeStatus) DeepCopy() *NodeStatus { - if in == nil { - return nil - } - out := new(NodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. -func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { - if in == nil { - return nil - } - out := new(NodeSystemInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. -func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { - if in == nil { - return nil - } - out := new(ObjectFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { - *out = *in - in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) - if in.DeletionTimestamp != nil { - in, out := &in.DeletionTimestamp, &out.DeletionTimestamp - if *in == nil { - *out = nil - } else { - *out = new(v1.Time) - (*in).DeepCopyInto(*out) - } - } - if in.DeletionGracePeriodSeconds != nil { - in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]v1.OwnerReference, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - if *in == nil { - *out = nil - } else { - *out = new(v1.Initializers) - (*in).DeepCopyInto(*out) - } - } - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. -func (in *ObjectMeta) DeepCopy() *ObjectMeta { - if in == nil { - return nil - } - out := new(ObjectMeta) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { - if in == nil { - return nil - } - out := new(ObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ObjectReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. -func (in *PersistentVolume) DeepCopy() *PersistentVolume { - if in == nil { - return nil - } - out := new(PersistentVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolume) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. -func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { - if in == nil { - return nil - } - out := new(PersistentVolumeClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. -func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. -func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if *in == nil { - *out = nil - } else { - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. -func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PersistentVolumeClaimCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. -func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. -func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. -func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { - if in == nil { - return nil - } - out := new(PersistentVolumeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { - *out = *in - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - if *in == nil { - *out = nil - } else { - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - if *in == nil { - *out = nil - } else { - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - if *in == nil { - *out = nil - } else { - *out = new(GlusterfsVolumeSource) - **out = **in - } - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - if *in == nil { - *out = nil - } else { - *out = new(NFSVolumeSource) - **out = **in - } - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - if *in == nil { - *out = nil - } else { - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - if *in == nil { - *out = nil - } else { - *out = new(QuobyteVolumeSource) - **out = **in - } - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - if *in == nil { - *out = nil - } else { - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - if *in == nil { - *out = nil - } else { - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - if *in == nil { - *out = nil - } else { - *out = new(CinderVolumeSource) - **out = **in - } - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - if *in == nil { - *out = nil - } else { - *out = new(CephFSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - if *in == nil { - *out = nil - } else { - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - if *in == nil { - *out = nil - } else { - *out = new(FlockerVolumeSource) - **out = **in - } - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - if *in == nil { - *out = nil - } else { - *out = new(AzureFilePersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - if *in == nil { - *out = nil - } else { - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - if *in == nil { - *out = nil - } else { - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - if *in == nil { - *out = nil - } else { - *out = new(PortworxVolumeSource) - **out = **in - } - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - if *in == nil { - *out = nil - } else { - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Local != nil { - in, out := &in.Local, &out.Local - if *in == nil { - *out = nil - } else { - *out = new(LocalVolumeSource) - **out = **in - } - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - if *in == nil { - *out = nil - } else { - *out = new(StorageOSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. -func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - if in.MountOptions != nil { - in, out := &in.MountOptions, &out.MountOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. -func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. -func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. -func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(PhotonPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pod) DeepCopyInto(out *Pod) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. -func (in *Pod) DeepCopy() *Pod { - if in == nil { - return nil - } - out := new(Pod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pod) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. -func (in *PodAffinity) DeepCopy() *PodAffinity { - if in == nil { - return nil - } - out := new(PodAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - if *in == nil { - *out = nil - } else { - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - } - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. -func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { - if in == nil { - return nil - } - out := new(PodAffinityTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. -func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { - if in == nil { - return nil - } - out := new(PodAntiAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. -func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { - if in == nil { - return nil - } - out := new(PodAttachOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodAttachOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodCondition) DeepCopyInto(out *PodCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. -func (in *PodCondition) DeepCopy() *PodCondition { - if in == nil { - return nil - } - out := new(PodCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. -func (in *PodExecOptions) DeepCopy() *PodExecOptions { - if in == nil { - return nil - } - out := new(PodExecOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodExecOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodList) DeepCopyInto(out *PodList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. -func (in *PodList) DeepCopy() *PodList { - if in == nil { - return nil - } - out := new(PodList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.SinceSeconds != nil { - in, out := &in.SinceSeconds, &out.SinceSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.SinceTime != nil { - in, out := &in.SinceTime, &out.SinceTime - if *in == nil { - *out = nil - } else { - *out = new(v1.Time) - (*in).DeepCopyInto(*out) - } - } - if in.TailLines != nil { - in, out := &in.TailLines, &out.TailLines - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.LimitBytes != nil { - in, out := &in.LimitBytes, &out.LimitBytes - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. -func (in *PodLogOptions) DeepCopy() *PodLogOptions { - if in == nil { - return nil - } - out := new(PodLogOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodLogOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]int32, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. -func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { - if in == nil { - return nil - } - out := new(PodPortForwardOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. -func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { - if in == nil { - return nil - } - out := new(PodProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - if *in == nil { - *out = nil - } else { - *out = new(SELinuxOptions) - **out = **in - } - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.SupplementalGroups != nil { - in, out := &in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(*in)) - copy(*out, *in) - } - if in.FSGroup != nil { - in, out := &in.FSGroup, &out.FSGroup - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. -func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { - if in == nil { - return nil - } - out := new(PodSecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSignature) DeepCopyInto(out *PodSignature) { - *out = *in - if in.PodController != nil { - in, out := &in.PodController, &out.PodController - if *in == nil { - *out = nil - } else { - *out = new(v1.OwnerReference) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. -func (in *PodSignature) DeepCopy() *PodSignature { - if in == nil { - return nil - } - out := new(PodSignature) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSpec) DeepCopyInto(out *PodSpec) { - *out = *in - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - if *in == nil { - *out = nil - } else { - *out = new(PodSecurityContext) - (*in).DeepCopyInto(*out) - } - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - if *in == nil { - *out = nil - } else { - *out = new(Affinity) - (*in).DeepCopyInto(*out) - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.HostAliases != nil { - in, out := &in.HostAliases, &out.HostAliases - *out = make([]HostAlias, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. -func (in *PodSpec) DeepCopy() *PodSpec { - if in == nil { - return nil - } - out := new(PodSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatus) DeepCopyInto(out *PodStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = new(v1.Time) - (*in).DeepCopyInto(*out) - } - } - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. -func (in *PodStatus) DeepCopy() *PodStatus { - if in == nil { - return nil - } - out := new(PodStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. -func (in *PodStatusResult) DeepCopy() *PodStatusResult { - if in == nil { - return nil - } - out := new(PodStatusResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodStatusResult) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. -func (in *PodTemplate) DeepCopy() *PodTemplate { - if in == nil { - return nil - } - out := new(PodTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. -func (in *PodTemplateList) DeepCopy() *PodTemplateList { - if in == nil { - return nil - } - out := new(PodTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. -func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { - if in == nil { - return nil - } - out := new(PodTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. -func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { - if in == nil { - return nil - } - out := new(PortworxVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Preconditions) DeepCopyInto(out *Preconditions) { - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID - if *in == nil { - *out = nil - } else { - *out = new(types.UID) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. -func (in *Preconditions) DeepCopy() *Preconditions { - if in == nil { - return nil - } - out := new(Preconditions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { - *out = *in - in.PodSignature.DeepCopyInto(&out.PodSignature) - in.EvictionTime.DeepCopyInto(&out.EvictionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. -func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { - if in == nil { - return nil - } - out := new(PreferAvoidPodsEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { - *out = *in - in.Preference.DeepCopyInto(&out.Preference) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. -func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { - if in == nil { - return nil - } - out := new(PreferredSchedulingTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Probe) DeepCopyInto(out *Probe) { - *out = *in - in.Handler.DeepCopyInto(&out.Handler) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. -func (in *Probe) DeepCopy() *Probe { - if in == nil { - return nil - } - out := new(Probe) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]VolumeProjection, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. -func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { - if in == nil { - return nil - } - out := new(ProjectedVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. -func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { - if in == nil { - return nil - } - out := new(QuobyteVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. -func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { - if in == nil { - return nil - } - out := new(RBDVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. -func (in *RangeAllocation) DeepCopy() *RangeAllocation { - if in == nil { - return nil - } - out := new(RangeAllocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RangeAllocation) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. -func (in *ReplicationController) DeepCopy() *ReplicationController { - if in == nil { - return nil - } - out := new(ReplicationController) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationController) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. -func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { - if in == nil { - return nil - } - out := new(ReplicationControllerCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. -func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { - if in == nil { - return nil - } - out := new(ReplicationControllerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Template != nil { - in, out := &in.Template, &out.Template - if *in == nil { - *out = nil - } else { - *out = new(PodTemplateSpec) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. -func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { - if in == nil { - return nil - } - out := new(ReplicationControllerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicationControllerCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. -func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { - if in == nil { - return nil - } - out := new(ReplicationControllerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { - *out = *in - out.Divisor = in.Divisor.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. -func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { - if in == nil { - return nil - } - out := new(ResourceFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. -func (in *ResourceQuota) DeepCopy() *ResourceQuota { - if in == nil { - return nil - } - out := new(ResourceQuota) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuota) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceQuota, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. -func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { - if in == nil { - return nil - } - out := new(ResourceQuotaList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. -func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { - if in == nil { - return nil - } - out := new(ResourceQuotaSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Used != nil { - in, out := &in.Used, &out.Used - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. -func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { - if in == nil { - return nil - } - out := new(ResourceQuotaStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. -func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { - if in == nil { - return nil - } - out := new(ResourceRequirements) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. -func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { - if in == nil { - return nil - } - out := new(SELinuxOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. -func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { - if in == nil { - return nil - } - out := new(ScaleIOVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Secret) DeepCopyInto(out *Secret) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string][]byte, len(*in)) - for key, val := range *in { - if val == nil { - (*out)[key] = nil - } else { - (*out)[key] = make([]byte, len(val)) - copy((*out)[key], val) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. -func (in *Secret) DeepCopy() *Secret { - if in == nil { - return nil - } - out := new(Secret) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Secret) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. -func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { - if in == nil { - return nil - } - out := new(SecretEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. -func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { - if in == nil { - return nil - } - out := new(SecretKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretList) DeepCopyInto(out *SecretList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. -func (in *SecretList) DeepCopy() *SecretList { - if in == nil { - return nil - } - out := new(SecretList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SecretList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. -func (in *SecretProjection) DeepCopy() *SecretProjection { - if in == nil { - return nil - } - out := new(SecretProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretReference) DeepCopyInto(out *SecretReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. -func (in *SecretReference) DeepCopy() *SecretReference { - if in == nil { - return nil - } - out := new(SecretReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. -func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { - if in == nil { - return nil - } - out := new(SecretVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { - *out = *in - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - if *in == nil { - *out = nil - } else { - *out = new(Capabilities) - (*in).DeepCopyInto(*out) - } - } - if in.Privileged != nil { - in, out := &in.Privileged, &out.Privileged - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - if *in == nil { - *out = nil - } else { - *out = new(SELinuxOptions) - **out = **in - } - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.ReadOnlyRootFilesystem != nil { - in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. -func (in *SecurityContext) DeepCopy() *SecurityContext { - if in == nil { - return nil - } - out := new(SecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { - *out = *in - out.TypeMeta = in.TypeMeta - out.Reference = in.Reference - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. -func (in *SerializedReference) DeepCopy() *SerializedReference { - if in == nil { - return nil - } - out := new(SerializedReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SerializedReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Service) DeepCopyInto(out *Service) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. -func (in *Service) DeepCopy() *Service { - if in == nil { - return nil - } - out := new(Service) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Service) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. -func (in *ServiceAccount) DeepCopy() *ServiceAccount { - if in == nil { - return nil - } - out := new(ServiceAccount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccount) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceAccount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. -func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { - if in == nil { - return nil - } - out := new(ServiceAccountList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccountList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceList) DeepCopyInto(out *ServiceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. -func (in *ServiceList) DeepCopy() *ServiceList { - if in == nil { - return nil - } - out := new(ServiceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServicePort) DeepCopyInto(out *ServicePort) { - *out = *in - out.TargetPort = in.TargetPort - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. -func (in *ServicePort) DeepCopy() *ServicePort { - if in == nil { - return nil - } - out := new(ServicePort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. -func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { - if in == nil { - return nil - } - out := new(ServiceProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ServicePort, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExternalIPs != nil { - in, out := &in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SessionAffinityConfig != nil { - in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig - if *in == nil { - *out = nil - } else { - *out = new(SessionAffinityConfig) - (*in).DeepCopyInto(*out) - } - } - if in.LoadBalancerSourceRanges != nil { - in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. -func (in *ServiceSpec) DeepCopy() *ServiceSpec { - if in == nil { - return nil - } - out := new(ServiceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { - *out = *in - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. -func (in *ServiceStatus) DeepCopy() *ServiceStatus { - if in == nil { - return nil - } - out := new(ServiceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { - *out = *in - if in.ClientIP != nil { - in, out := &in.ClientIP, &out.ClientIP - if *in == nil { - *out = nil - } else { - *out = new(ClientIPConfig) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. -func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { - if in == nil { - return nil - } - out := new(SessionAffinityConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. -func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. -func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sysctl) DeepCopyInto(out *Sysctl) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. -func (in *Sysctl) DeepCopy() *Sysctl { - if in == nil { - return nil - } - out := new(Sysctl) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { - *out = *in - out.Port = in.Port - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. -func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { - if in == nil { - return nil - } - out := new(TCPSocketAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Taint) DeepCopyInto(out *Taint) { - *out = *in - in.TimeAdded.DeepCopyInto(&out.TimeAdded) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. -func (in *Taint) DeepCopy() *Taint { - if in == nil { - return nil - } - out := new(Taint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Toleration) DeepCopyInto(out *Toleration) { - *out = *in - if in.TolerationSeconds != nil { - in, out := &in.TolerationSeconds, &out.TolerationSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. -func (in *Toleration) DeepCopy() *Toleration { - if in == nil { - return nil - } - out := new(Toleration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Volume) DeepCopyInto(out *Volume) { - *out = *in - in.VolumeSource.DeepCopyInto(&out.VolumeSource) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. -func (in *Volume) DeepCopy() *Volume { - if in == nil { - return nil - } - out := new(Volume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { - *out = *in - if in.MountPropagation != nil { - in, out := &in.MountPropagation, &out.MountPropagation - if *in == nil { - *out = nil - } else { - *out = new(MountPropagationMode) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. -func (in *VolumeMount) DeepCopy() *VolumeMount { - if in == nil { - return nil - } - out := new(VolumeMount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { - *out = *in - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - if *in == nil { - *out = nil - } else { - *out = new(SecretProjection) - (*in).DeepCopyInto(*out) - } - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - if *in == nil { - *out = nil - } else { - *out = new(DownwardAPIProjection) - (*in).DeepCopyInto(*out) - } - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapProjection) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. -func (in *VolumeProjection) DeepCopy() *VolumeProjection { - if in == nil { - return nil - } - out := new(VolumeProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { - *out = *in - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - if *in == nil { - *out = nil - } else { - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - if *in == nil { - *out = nil - } else { - *out = new(EmptyDirVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - if *in == nil { - *out = nil - } else { - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - if *in == nil { - *out = nil - } else { - *out = new(GitRepoVolumeSource) - **out = **in - } - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - if *in == nil { - *out = nil - } else { - *out = new(SecretVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - if *in == nil { - *out = nil - } else { - *out = new(NFSVolumeSource) - **out = **in - } - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - if *in == nil { - *out = nil - } else { - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - if *in == nil { - *out = nil - } else { - *out = new(GlusterfsVolumeSource) - **out = **in - } - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - if *in == nil { - *out = nil - } else { - *out = new(PersistentVolumeClaimVolumeSource) - **out = **in - } - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - if *in == nil { - *out = nil - } else { - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - if *in == nil { - *out = nil - } else { - *out = new(QuobyteVolumeSource) - **out = **in - } - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - if *in == nil { - *out = nil - } else { - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - if *in == nil { - *out = nil - } else { - *out = new(CinderVolumeSource) - **out = **in - } - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - if *in == nil { - *out = nil - } else { - *out = new(CephFSVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - if *in == nil { - *out = nil - } else { - *out = new(FlockerVolumeSource) - **out = **in - } - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - if *in == nil { - *out = nil - } else { - *out = new(DownwardAPIVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - if *in == nil { - *out = nil - } else { - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - if *in == nil { - *out = nil - } else { - *out = new(AzureFileVolumeSource) - **out = **in - } - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - if *in == nil { - *out = nil - } else { - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - if *in == nil { - *out = nil - } else { - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - } - if in.Projected != nil { - in, out := &in.Projected, &out.Projected - if *in == nil { - *out = nil - } else { - *out = new(ProjectedVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - if *in == nil { - *out = nil - } else { - *out = new(PortworxVolumeSource) - **out = **in - } - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - if *in == nil { - *out = nil - } else { - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - if *in == nil { - *out = nil - } else { - *out = new(StorageOSVolumeSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. -func (in *VolumeSource) DeepCopy() *VolumeSource { - if in == nil { - return nil - } - out := new(VolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. -func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { - if in == nil { - return nil - } - out := new(VsphereVirtualDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { - *out = *in - in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. -func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { - if in == nil { - return nil - } - out := new(WeightedPodAffinityTerm) - in.DeepCopyInto(out) - return out -}