From affb56f9a083acf1591af1cd4217740b4483cca4 Mon Sep 17 00:00:00 2001 From: Gabriel Adrian Samfira Date: Mon, 18 Dec 2023 12:16:48 +0000 Subject: [PATCH 1/4] Remove the LXD internal provider Canonical have relicensed the LXD project to AGPLv3. This means that we can no longer update the go LXD client without re-licensing GARM as AGPLv3. This is not desirable or possible. The existing code seems to be Apache 2.0 and all code that has already been contributed seems to stay as Apache 2.0, but new contributions from Canonical employees will be AGPLv3. We cannot risc including AGPLv3 code now or in the future, so we will separate the LXD provider into its own project which can be AGPLv3. GARM will simply execute the external provider. If the client code of LXD will ever be split from the main project and re-licensed as Apache 2.0 or a compatible license, we will reconsider adding it back as a native provider. Although in the long run, I believe external providers will be the only option as they are easier to write, easier to maintain and safer to ship (a bug in the provider does not crash GARM itself). Signed-off-by: Gabriel Adrian Samfira --- config/config.go | 5 - config/config_test.go | 11 +- config/lxd.go | 163 - config/lxd_test.go | 175 -- go.mod | 20 +- go.sum | 132 +- runner/providers/lxd/images.go | 89 - runner/providers/lxd/lxd.go | 530 ---- runner/providers/lxd/specs.go | 40 - runner/providers/lxd/util.go | 234 -- runner/providers/providers.go | 10 +- test/integration/config/config.toml | 27 +- .../integration/config/garm-provider-lxd.toml | 21 + test/integration/scripts/setup-garm.sh | 9 + .../cloudconfig/cloudconfig.go | 182 -- .../cloudconfig/templates.go | 539 ---- .../garm-provider-common/cloudconfig/util.go | 207 -- .../github.com/flosch/pongo2/.gitattributes | 1 - vendor/github.com/flosch/pongo2/.gitignore | 41 - vendor/github.com/flosch/pongo2/.travis.yml | 8 - vendor/github.com/flosch/pongo2/AUTHORS | 11 - vendor/github.com/flosch/pongo2/LICENSE | 20 - vendor/github.com/flosch/pongo2/README.md | 167 - vendor/github.com/flosch/pongo2/context.go | 137 - vendor/github.com/flosch/pongo2/doc.go | 31 - vendor/github.com/flosch/pongo2/error.go | 91 - vendor/github.com/flosch/pongo2/filters.go | 141 - .../flosch/pongo2/filters_builtin.go | 927 ------ vendor/github.com/flosch/pongo2/helpers.go | 15 - vendor/github.com/flosch/pongo2/lexer.go | 432 --- vendor/github.com/flosch/pongo2/nodes.go | 16 - vendor/github.com/flosch/pongo2/nodes_html.go | 23 - .../github.com/flosch/pongo2/nodes_wrapper.go | 16 - vendor/github.com/flosch/pongo2/options.go | 26 - vendor/github.com/flosch/pongo2/parser.go | 309 -- .../flosch/pongo2/parser_document.go | 59 - .../flosch/pongo2/parser_expression.go | 517 ---- vendor/github.com/flosch/pongo2/pongo2.go | 14 - vendor/github.com/flosch/pongo2/tags.go | 133 - .../flosch/pongo2/tags_autoescape.go | 52 - vendor/github.com/flosch/pongo2/tags_block.go | 129 - .../github.com/flosch/pongo2/tags_comment.go | 27 - vendor/github.com/flosch/pongo2/tags_cycle.go | 106 - .../github.com/flosch/pongo2/tags_extends.go | 52 - .../github.com/flosch/pongo2/tags_filter.go | 95 - .../github.com/flosch/pongo2/tags_firstof.go | 49 - vendor/github.com/flosch/pongo2/tags_for.go | 159 - vendor/github.com/flosch/pongo2/tags_if.go | 76 - .../flosch/pongo2/tags_ifchanged.go | 116 - .../github.com/flosch/pongo2/tags_ifequal.go | 78 - .../flosch/pongo2/tags_ifnotequal.go | 78 - .../github.com/flosch/pongo2/tags_import.go | 84 - .../github.com/flosch/pongo2/tags_include.go | 146 - vendor/github.com/flosch/pongo2/tags_lorem.go | 132 - vendor/github.com/flosch/pongo2/tags_macro.go | 149 - vendor/github.com/flosch/pongo2/tags_now.go | 50 - vendor/github.com/flosch/pongo2/tags_set.go | 50 - .../flosch/pongo2/tags_spaceless.go | 54 - vendor/github.com/flosch/pongo2/tags_ssi.go | 68 - .../flosch/pongo2/tags_templatetag.go | 45 - .../flosch/pongo2/tags_widthratio.go | 83 - vendor/github.com/flosch/pongo2/tags_with.go | 88 - vendor/github.com/flosch/pongo2/template.go | 276 -- .../flosch/pongo2/template_loader.go | 156 - .../github.com/flosch/pongo2/template_sets.go | 305 -- vendor/github.com/flosch/pongo2/value.go | 540 ---- vendor/github.com/flosch/pongo2/variable.go | 693 ----- .../macaroon-bakery/v3/LICENSE | 187 -- .../macaroon-bakery/v3/bakery/bakery.go | 97 - .../macaroon-bakery/v3/bakery/checker.go | 503 ---- .../v3/bakery/checkers/checkers.go | 246 -- .../v3/bakery/checkers/declared.go | 137 - .../v3/bakery/checkers/namespace.go | 214 -- .../v3/bakery/checkers/time.go | 97 - .../macaroon-bakery/v3/bakery/codec.go | 381 --- .../macaroon-bakery/v3/bakery/discharge.go | 282 -- .../macaroon-bakery/v3/bakery/dischargeall.go | 56 - .../macaroon-bakery/v3/bakery/doc.go | 88 - .../macaroon-bakery/v3/bakery/error.go | 77 - .../macaroon-bakery/v3/bakery/keys.go | 219 -- .../macaroon-bakery/v3/bakery/logger.go | 28 - .../macaroon-bakery/v3/bakery/macaroon.go | 356 --- .../macaroon-bakery/v3/bakery/oven.go | 359 --- .../macaroon-bakery/v3/bakery/slice.go | 134 - .../macaroon-bakery/v3/bakery/store.go | 63 - .../macaroon-bakery/v3/bakery/version.go | 30 - .../macaroon-bakery/v3/httpbakery/browser.go | 200 -- .../macaroon-bakery/v3/httpbakery/checkers.go | 157 - .../macaroon-bakery/v3/httpbakery/client.go | 727 ----- .../v3/httpbakery/context_go17.go | 12 - .../v3/httpbakery/context_prego17.go | 12 - .../v3/httpbakery/discharge.go | 367 --- .../httpbakery/dischargeclient_generated.go | 35 - .../macaroon-bakery/v3/httpbakery/error.go | 359 --- .../macaroon-bakery/v3/httpbakery/keyring.go | 113 - .../macaroon-bakery/v3/httpbakery/oven.go | 88 - .../macaroon-bakery/v3/httpbakery/request.go | 197 -- .../macaroon-bakery/v3/httpbakery/visitor.go | 68 - .../v3/internal/httputil/relativeurl.go | 64 - .../go-macaroon-bakery/macaroonpb/LICENSE | 187 -- .../go-macaroon-bakery/macaroonpb/README.md | 13 - .../go-macaroon-bakery/macaroonpb/id.go | 19 - .../go-macaroon-bakery/macaroonpb/id.pb.go | 238 -- .../go-macaroon-bakery/macaroonpb/id.proto | 14 - vendor/github.com/juju/webbrowser/.gitignore | 24 - vendor/github.com/juju/webbrowser/LICENSE | 165 - vendor/github.com/juju/webbrowser/README.md | 2 - .../github.com/juju/webbrowser/webbrowser.go | 61 - .../julienschmidt/httprouter/.travis.yml | 18 - .../julienschmidt/httprouter/LICENSE | 29 - .../julienschmidt/httprouter/README.md | 300 -- .../julienschmidt/httprouter/path.go | 123 - .../julienschmidt/httprouter/router.go | 452 --- .../julienschmidt/httprouter/tree.go | 666 ---- .../github.com/kballard/go-shellquote/LICENSE | 19 - .../github.com/kballard/go-shellquote/README | 36 - .../github.com/kballard/go-shellquote/doc.go | 3 - .../kballard/go-shellquote/quote.go | 102 - .../kballard/go-shellquote/unquote.go | 156 - vendor/github.com/kr/fs/LICENSE | 27 - vendor/github.com/kr/fs/Readme | 3 - vendor/github.com/kr/fs/filesystem.go | 36 - vendor/github.com/kr/fs/walk.go | 95 - vendor/github.com/lxc/lxd/AUTHORS | 5 - vendor/github.com/lxc/lxd/COPYING | 202 -- .../github.com/lxc/lxd/client/connection.go | 358 --- vendor/github.com/lxc/lxd/client/doc.go | 146 - vendor/github.com/lxc/lxd/client/events.go | 109 - .../github.com/lxc/lxd/client/interfaces.go | 686 ----- .../lxc/lxd/client/interfaces_legacy.go | 126 - vendor/github.com/lxc/lxd/client/lxd.go | 493 --- .../lxc/lxd/client/lxd_certificates.go | 106 - .../github.com/lxc/lxd/client/lxd_cluster.go | 311 -- .../lxc/lxd/client/lxd_containers.go | 1799 ----------- .../github.com/lxc/lxd/client/lxd_events.go | 197 -- .../github.com/lxc/lxd/client/lxd_images.go | 1005 ------- .../lxc/lxd/client/lxd_instances.go | 2677 ----------------- .../lxc/lxd/client/lxd_network_acls.go | 159 - .../lxc/lxd/client/lxd_network_forwards.go | 105 - .../lxd/client/lxd_network_load_balancers.go | 113 - .../lxc/lxd/client/lxd_network_peer.go | 106 - .../lxc/lxd/client/lxd_network_zones.go | 202 -- .../github.com/lxc/lxd/client/lxd_networks.go | 154 - .../lxc/lxd/client/lxd_operations.go | 103 - .../github.com/lxc/lxd/client/lxd_profiles.go | 94 - .../github.com/lxc/lxd/client/lxd_projects.go | 139 - .../github.com/lxc/lxd/client/lxd_server.go | 193 -- .../lxc/lxd/client/lxd_storage_buckets.go | 235 -- .../lxc/lxd/client/lxd_storage_pools.go | 128 - .../lxc/lxd/client/lxd_storage_volumes.go | 983 ------ .../github.com/lxc/lxd/client/lxd_warnings.go | 90 - .../github.com/lxc/lxd/client/operations.go | 340 --- .../lxc/lxd/client/simplestreams.go | 52 - .../lxc/lxd/client/simplestreams_images.go | 302 -- vendor/github.com/lxc/lxd/client/util.go | 243 -- .../lxc/lxd/lxd/device/config/consts.go | 4 - .../lxd/device/config/device_proxyaddress.go | 9 - .../lxd/lxd/device/config/device_runconfig.go | 70 - .../lxc/lxd/lxd/device/config/devices.go | 203 -- .../lxc/lxd/lxd/device/config/devices_sort.go | 71 - .../lxd/lxd/device/config/devices_utils.go | 37 - .../instance/instancetype/instance_type.go | 62 - .../instance/instancetype/instance_vmagent.go | 22 - .../github.com/lxc/lxd/lxd/revert/revert.go | 47 - .../lxc/lxd/shared/api/certificate.go | 128 - .../github.com/lxc/lxd/shared/api/cluster.go | 312 -- .../lxc/lxd/shared/api/cluster_state.go | 28 - .../lxc/lxd/shared/api/container.go | 141 - .../lxc/lxd/shared/api/container_backup.go | 31 - .../lxc/lxd/shared/api/container_console.go | 17 - .../lxc/lxd/shared/api/container_exec.go | 26 - .../lxc/lxd/shared/api/container_snapshot.go | 53 - .../lxc/lxd/shared/api/container_state.go | 70 - .../github.com/lxc/lxd/shared/api/devlxd.go | 25 - vendor/github.com/lxc/lxd/shared/api/doc.go | 13 - vendor/github.com/lxc/lxd/shared/api/error.go | 72 - vendor/github.com/lxc/lxd/shared/api/event.go | 164 - .../lxc/lxd/shared/api/event_lifecycle.go | 122 - vendor/github.com/lxc/lxd/shared/api/image.go | 323 -- vendor/github.com/lxc/lxd/shared/api/init.go | 66 - .../github.com/lxc/lxd/shared/api/instance.go | 348 --- .../lxc/lxd/shared/api/instance_backup.go | 80 - .../lxc/lxd/shared/api/instance_console.go | 30 - .../lxc/lxd/shared/api/instance_exec.go | 56 - .../lxc/lxd/shared/api/instance_snapshot.go | 126 - .../lxc/lxd/shared/api/instance_state.go | 201 -- .../lxc/lxd/shared/api/migration.go | 10 - .../github.com/lxc/lxd/shared/api/network.go | 318 -- .../lxc/lxd/shared/api/network_acl.go | 157 - .../lxc/lxd/shared/api/network_forward.go | 143 - .../lxd/shared/api/network_load_balancer.go | 159 - .../lxc/lxd/shared/api/network_peer.go | 81 - .../lxc/lxd/shared/api/network_zone.go | 120 - .../lxc/lxd/shared/api/operation.go | 178 -- .../github.com/lxc/lxd/shared/api/profile.go | 67 - .../github.com/lxc/lxd/shared/api/project.go | 98 - .../github.com/lxc/lxd/shared/api/resource.go | 1070 ------- .../github.com/lxc/lxd/shared/api/response.go | 90 - .../github.com/lxc/lxd/shared/api/server.go | 206 -- .../lxc/lxd/shared/api/status_code.go | 69 - .../lxc/lxd/shared/api/storage_pool.go | 100 - .../lxc/lxd/shared/api/storage_pool_bucket.go | 148 - .../lxc/lxd/shared/api/storage_pool_volume.go | 239 -- .../shared/api/storage_pool_volume_backup.go | 70 - .../api/storage_pool_volume_snapshot.go | 83 - .../shared/api/storage_pool_volume_state.go | 28 - vendor/github.com/lxc/lxd/shared/api/url.go | 76 - .../github.com/lxc/lxd/shared/api/warning.go | 65 - vendor/github.com/lxc/lxd/shared/archive.go | 60 - .../lxc/lxd/shared/cancel/canceller.go | 21 - .../github.com/lxc/lxd/shared/cancel/http.go | 81 - vendor/github.com/lxc/lxd/shared/cert.go | 574 ---- vendor/github.com/lxc/lxd/shared/cgo.go | 12 - vendor/github.com/lxc/lxd/shared/instance.go | 397 --- .../lxc/lxd/shared/ioprogress/data.go | 16 - .../lxc/lxd/shared/ioprogress/reader.go | 25 - .../lxc/lxd/shared/ioprogress/tracker.go | 77 - .../lxc/lxd/shared/ioprogress/writer.go | 25 - vendor/github.com/lxc/lxd/shared/json.go | 51 - .../lxc/lxd/shared/logger/format.go | 25 - .../github.com/lxc/lxd/shared/logger/log.go | 72 - .../lxc/lxd/shared/logger/syslog_linux.go | 38 - .../lxc/lxd/shared/logger/syslog_other.go | 13 - .../lxc/lxd/shared/logger/toplevel.go | 70 - .../github.com/lxc/lxd/shared/logger/types.go | 35 - .../lxc/lxd/shared/logger/wrapper.go | 55 - vendor/github.com/lxc/lxd/shared/network.go | 517 ---- .../github.com/lxc/lxd/shared/network_ip.go | 32 - .../github.com/lxc/lxd/shared/network_unix.go | 26 - .../lxc/lxd/shared/network_windows.go | 66 - .../lxc/lxd/shared/osarch/architectures.go | 162 - .../lxd/shared/osarch/architectures_linux.go | 20 - .../lxd/shared/osarch/architectures_others.go | 7 - .../lxc/lxd/shared/osarch/release.go | 49 - vendor/github.com/lxc/lxd/shared/proxy.go | 184 -- .../lxc/lxd/shared/simplestreams/index.go | 17 - .../lxc/lxd/shared/simplestreams/products.go | 290 -- .../lxd/shared/simplestreams/simplestreams.go | 506 ---- .../lxc/lxd/shared/simplestreams/sort.go | 125 - .../lxc/lxd/shared/tcp/tcp_timeout_user.go | 28 - .../lxd/shared/tcp/tcp_timeout_user_noop.go | 14 - .../lxc/lxd/shared/tcp/tcp_timeouts.go | 70 - .../lxc/lxd/shared/termios/termios.go | 1 - .../lxc/lxd/shared/termios/termios_linux.go | 80 - .../lxc/lxd/shared/termios/termios_other.go | 49 - .../github.com/lxc/lxd/shared/units/units.go | 194 -- vendor/github.com/lxc/lxd/shared/util.go | 1358 --------- .../github.com/lxc/lxd/shared/util_linux.go | 607 ---- vendor/github.com/lxc/lxd/shared/util_unix.go | 15 - .../github.com/lxc/lxd/shared/util_windows.go | 11 - .../lxc/lxd/shared/validate/validate.go | 872 ------ vendor/github.com/pborman/uuid/.travis.yml | 10 - .../github.com/pborman/uuid/CONTRIBUTING.md | 10 - vendor/github.com/pborman/uuid/CONTRIBUTORS | 1 - vendor/github.com/pborman/uuid/LICENSE | 27 - vendor/github.com/pborman/uuid/README.md | 15 - vendor/github.com/pborman/uuid/dce.go | 84 - vendor/github.com/pborman/uuid/doc.go | 13 - vendor/github.com/pborman/uuid/hash.go | 53 - vendor/github.com/pborman/uuid/marshal.go | 85 - vendor/github.com/pborman/uuid/node.go | 50 - vendor/github.com/pborman/uuid/sql.go | 68 - vendor/github.com/pborman/uuid/time.go | 57 - vendor/github.com/pborman/uuid/util.go | 32 - vendor/github.com/pborman/uuid/uuid.go | 162 - vendor/github.com/pborman/uuid/version1.go | 23 - vendor/github.com/pborman/uuid/version4.go | 26 - vendor/github.com/pkg/sftp/.gitignore | 10 - vendor/github.com/pkg/sftp/CONTRIBUTORS | 3 - vendor/github.com/pkg/sftp/LICENSE | 9 - vendor/github.com/pkg/sftp/Makefile | 27 - vendor/github.com/pkg/sftp/README.md | 44 - vendor/github.com/pkg/sftp/allocator.go | 96 - vendor/github.com/pkg/sftp/attrs.go | 90 - vendor/github.com/pkg/sftp/attrs_stubs.go | 11 - vendor/github.com/pkg/sftp/attrs_unix.go | 16 - vendor/github.com/pkg/sftp/client.go | 1977 ------------ vendor/github.com/pkg/sftp/conn.go | 189 -- vendor/github.com/pkg/sftp/debug.go | 9 - vendor/github.com/pkg/sftp/fuzz.go | 22 - .../internal/encoding/ssh/filexfer/attrs.go | 325 -- .../internal/encoding/ssh/filexfer/buffer.go | 293 -- .../encoding/ssh/filexfer/extended_packets.go | 142 - .../encoding/ssh/filexfer/extensions.go | 46 - .../encoding/ssh/filexfer/filexfer.go | 54 - .../sftp/internal/encoding/ssh/filexfer/fx.go | 147 - .../internal/encoding/ssh/filexfer/fxp.go | 124 - .../encoding/ssh/filexfer/handle_packets.go | 249 -- .../encoding/ssh/filexfer/init_packets.go | 99 - .../encoding/ssh/filexfer/open_packets.go | 89 - .../internal/encoding/ssh/filexfer/packets.go | 323 -- .../encoding/ssh/filexfer/path_packets.go | 368 --- .../encoding/ssh/filexfer/permissions.go | 114 - .../encoding/ssh/filexfer/response_packets.go | 243 -- vendor/github.com/pkg/sftp/ls_formatting.go | 81 - vendor/github.com/pkg/sftp/ls_plan9.go | 21 - vendor/github.com/pkg/sftp/ls_stub.go | 11 - vendor/github.com/pkg/sftp/ls_unix.go | 23 - vendor/github.com/pkg/sftp/match.go | 137 - vendor/github.com/pkg/sftp/packet-manager.go | 216 -- vendor/github.com/pkg/sftp/packet-typing.go | 135 - vendor/github.com/pkg/sftp/packet.go | 1276 -------- vendor/github.com/pkg/sftp/pool.go | 79 - vendor/github.com/pkg/sftp/release.go | 5 - vendor/github.com/pkg/sftp/request-attrs.go | 63 - vendor/github.com/pkg/sftp/request-errors.go | 54 - vendor/github.com/pkg/sftp/request-example.go | 666 ---- .../github.com/pkg/sftp/request-interfaces.go | 123 - vendor/github.com/pkg/sftp/request-plan9.go | 34 - vendor/github.com/pkg/sftp/request-readme.md | 53 - vendor/github.com/pkg/sftp/request-server.go | 328 -- vendor/github.com/pkg/sftp/request-unix.go | 27 - vendor/github.com/pkg/sftp/request.go | 630 ---- vendor/github.com/pkg/sftp/request_windows.go | 44 - vendor/github.com/pkg/sftp/server.go | 616 ---- .../pkg/sftp/server_statvfs_darwin.go | 21 - .../pkg/sftp/server_statvfs_impl.go | 29 - .../pkg/sftp/server_statvfs_linux.go | 22 - .../pkg/sftp/server_statvfs_plan9.go | 13 - .../pkg/sftp/server_statvfs_stubs.go | 15 - vendor/github.com/pkg/sftp/sftp.go | 258 -- vendor/github.com/pkg/sftp/stat_plan9.go | 103 - vendor/github.com/pkg/sftp/stat_posix.go | 124 - vendor/github.com/pkg/sftp/syscall_fixed.go | 9 - vendor/github.com/pkg/sftp/syscall_good.go | 8 - vendor/github.com/pkg/xattr/.gitignore | 26 - vendor/github.com/pkg/xattr/LICENSE | 25 - vendor/github.com/pkg/xattr/README.md | 45 - vendor/github.com/pkg/xattr/xattr.go | 257 -- vendor/github.com/pkg/xattr/xattr_bsd.go | 201 -- vendor/github.com/pkg/xattr/xattr_darwin.go | 90 - vendor/github.com/pkg/xattr/xattr_linux.go | 142 - vendor/github.com/pkg/xattr/xattr_solaris.go | 165 - .../github.com/pkg/xattr/xattr_unsupported.go | 70 - vendor/github.com/robfig/cron/v3/.gitignore | 22 - vendor/github.com/robfig/cron/v3/.travis.yml | 1 - vendor/github.com/robfig/cron/v3/LICENSE | 21 - vendor/github.com/robfig/cron/v3/README.md | 125 - vendor/github.com/robfig/cron/v3/chain.go | 92 - .../robfig/cron/v3/constantdelay.go | 27 - vendor/github.com/robfig/cron/v3/cron.go | 355 --- vendor/github.com/robfig/cron/v3/doc.go | 231 -- vendor/github.com/robfig/cron/v3/logger.go | 86 - vendor/github.com/robfig/cron/v3/option.go | 45 - vendor/github.com/robfig/cron/v3/parser.go | 434 --- vendor/github.com/robfig/cron/v3/spec.go | 188 -- vendor/github.com/rogpeppe/fastuuid/LICENSE | 26 - vendor/github.com/rogpeppe/fastuuid/README.md | 95 - vendor/github.com/rogpeppe/fastuuid/uuid.go | 146 - vendor/github.com/sirupsen/logrus/.gitignore | 4 - .../github.com/sirupsen/logrus/.golangci.yml | 40 - vendor/github.com/sirupsen/logrus/.travis.yml | 15 - .../github.com/sirupsen/logrus/CHANGELOG.md | 259 -- vendor/github.com/sirupsen/logrus/LICENSE | 21 - vendor/github.com/sirupsen/logrus/README.md | 513 ---- vendor/github.com/sirupsen/logrus/alt_exit.go | 76 - .../github.com/sirupsen/logrus/appveyor.yml | 14 - .../github.com/sirupsen/logrus/buffer_pool.go | 43 - vendor/github.com/sirupsen/logrus/doc.go | 26 - vendor/github.com/sirupsen/logrus/entry.go | 442 --- vendor/github.com/sirupsen/logrus/exported.go | 270 -- .../github.com/sirupsen/logrus/formatter.go | 78 - vendor/github.com/sirupsen/logrus/hooks.go | 34 - .../sirupsen/logrus/hooks/syslog/README.md | 39 - .../sirupsen/logrus/hooks/syslog/syslog.go | 55 - .../sirupsen/logrus/hooks/writer/README.md | 43 - .../sirupsen/logrus/hooks/writer/writer.go | 29 - .../sirupsen/logrus/json_formatter.go | 128 - vendor/github.com/sirupsen/logrus/logger.go | 417 --- vendor/github.com/sirupsen/logrus/logrus.go | 186 -- .../logrus/terminal_check_appengine.go | 11 - .../sirupsen/logrus/terminal_check_bsd.go | 13 - .../sirupsen/logrus/terminal_check_js.go | 7 - .../logrus/terminal_check_no_terminal.go | 11 - .../logrus/terminal_check_notappengine.go | 17 - .../sirupsen/logrus/terminal_check_solaris.go | 11 - .../sirupsen/logrus/terminal_check_unix.go | 13 - .../sirupsen/logrus/terminal_check_windows.go | 27 - .../sirupsen/logrus/text_formatter.go | 339 --- vendor/github.com/sirupsen/logrus/writer.go | 70 - vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 -- .../x/crypto/blake2b/blake2bAVX2_amd64.go | 38 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 745 ----- .../x/crypto/blake2b/blake2b_amd64.go | 25 - .../x/crypto/blake2b/blake2b_amd64.s | 279 -- .../x/crypto/blake2b/blake2b_generic.go | 182 -- .../x/crypto/blake2b/blake2b_ref.go | 12 - vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 -- .../golang.org/x/crypto/blake2b/register.go | 33 - .../x/crypto/curve25519/curve25519.go | 59 - .../x/crypto/curve25519/curve25519_compat.go | 105 - .../x/crypto/curve25519/curve25519_go120.go | 46 - .../x/crypto/curve25519/internal/field/README | 7 - .../x/crypto/curve25519/internal/field/fe.go | 416 --- .../curve25519/internal/field/fe_amd64.go | 16 - .../curve25519/internal/field/fe_amd64.s | 379 --- .../internal/field/fe_amd64_noasm.go | 12 - .../curve25519/internal/field/fe_arm64.go | 16 - .../curve25519/internal/field/fe_arm64.s | 43 - .../internal/field/fe_arm64_noasm.go | 12 - .../curve25519/internal/field/fe_generic.go | 264 -- .../curve25519/internal/field/sync.checkpoint | 1 - .../crypto/curve25519/internal/field/sync.sh | 19 - vendor/golang.org/x/crypto/ed25519/ed25519.go | 71 - vendor/golang.org/x/crypto/nacl/box/box.go | 182 -- .../x/crypto/nacl/secretbox/secretbox.go | 173 -- .../x/crypto/salsa20/salsa/hsalsa20.go | 146 - .../x/crypto/salsa20/salsa/salsa208.go | 201 -- .../x/crypto/salsa20/salsa/salsa20_amd64.go | 24 - .../x/crypto/salsa20/salsa/salsa20_amd64.s | 881 ------ .../x/crypto/salsa20/salsa/salsa20_noasm.go | 15 - .../x/crypto/salsa20/salsa/salsa20_ref.go | 233 -- vendor/golang.org/x/crypto/ssh/buffer.go | 97 - vendor/golang.org/x/crypto/ssh/certs.go | 589 ---- vendor/golang.org/x/crypto/ssh/channel.go | 633 ---- vendor/golang.org/x/crypto/ssh/cipher.go | 789 ----- vendor/golang.org/x/crypto/ssh/client.go | 282 -- vendor/golang.org/x/crypto/ssh/client_auth.go | 725 ----- vendor/golang.org/x/crypto/ssh/common.go | 471 --- vendor/golang.org/x/crypto/ssh/connection.go | 143 - vendor/golang.org/x/crypto/ssh/doc.go | 22 - vendor/golang.org/x/crypto/ssh/handshake.go | 735 ----- .../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 93 - vendor/golang.org/x/crypto/ssh/kex.go | 786 ----- vendor/golang.org/x/crypto/ssh/keys.go | 1447 --------- vendor/golang.org/x/crypto/ssh/mac.go | 68 - vendor/golang.org/x/crypto/ssh/messages.go | 877 ------ vendor/golang.org/x/crypto/ssh/mux.go | 351 --- vendor/golang.org/x/crypto/ssh/server.go | 774 ----- vendor/golang.org/x/crypto/ssh/session.go | 647 ---- vendor/golang.org/x/crypto/ssh/ssh_gss.go | 139 - vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 - vendor/golang.org/x/crypto/ssh/tcpip.go | 474 --- .../x/crypto/ssh/terminal/terminal.go | 76 - vendor/golang.org/x/crypto/ssh/transport.go | 358 --- vendor/golang.org/x/net/html/atom/atom.go | 78 - vendor/golang.org/x/net/html/atom/table.go | 783 ----- vendor/golang.org/x/net/html/const.go | 111 - vendor/golang.org/x/net/html/doc.go | 127 - vendor/golang.org/x/net/html/doctype.go | 156 - vendor/golang.org/x/net/html/entity.go | 2253 -------------- vendor/golang.org/x/net/html/escape.go | 339 --- vendor/golang.org/x/net/html/foreign.go | 222 -- vendor/golang.org/x/net/html/node.go | 225 -- vendor/golang.org/x/net/html/parse.go | 2460 --------------- vendor/golang.org/x/net/html/render.go | 293 -- vendor/golang.org/x/net/html/token.go | 1268 -------- .../x/net/publicsuffix/data/children | Bin 2876 -> 0 bytes .../golang.org/x/net/publicsuffix/data/nodes | Bin 48280 -> 0 bytes .../golang.org/x/net/publicsuffix/data/text | 1 - vendor/golang.org/x/net/publicsuffix/list.go | 203 -- vendor/golang.org/x/net/publicsuffix/table.go | 70 - vendor/golang.org/x/sys/plan9/asm.s | 8 - vendor/golang.org/x/sys/plan9/asm_plan9_386.s | 30 - .../golang.org/x/sys/plan9/asm_plan9_amd64.s | 30 - vendor/golang.org/x/sys/plan9/asm_plan9_arm.s | 25 - vendor/golang.org/x/sys/plan9/const_plan9.go | 70 - vendor/golang.org/x/sys/plan9/dir_plan9.go | 212 -- vendor/golang.org/x/sys/plan9/env_plan9.go | 31 - vendor/golang.org/x/sys/plan9/errors_plan9.go | 50 - vendor/golang.org/x/sys/plan9/mkall.sh | 150 - vendor/golang.org/x/sys/plan9/mkerrors.sh | 246 -- .../golang.org/x/sys/plan9/mksysnum_plan9.sh | 23 - .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 22 - vendor/golang.org/x/sys/plan9/pwd_plan9.go | 24 - vendor/golang.org/x/sys/plan9/race.go | 31 - vendor/golang.org/x/sys/plan9/race0.go | 26 - vendor/golang.org/x/sys/plan9/str.go | 23 - vendor/golang.org/x/sys/plan9/syscall.go | 110 - .../golang.org/x/sys/plan9/syscall_plan9.go | 361 --- .../x/sys/plan9/zsyscall_plan9_386.go | 285 -- .../x/sys/plan9/zsyscall_plan9_amd64.go | 285 -- .../x/sys/plan9/zsyscall_plan9_arm.go | 285 -- .../golang.org/x/sys/plan9/zsysnum_plan9.go | 49 - vendor/golang.org/x/term/CONTRIBUTING.md | 26 - vendor/golang.org/x/term/LICENSE | 27 - vendor/golang.org/x/term/PATENTS | 22 - vendor/golang.org/x/term/README.md | 19 - vendor/golang.org/x/term/codereview.cfg | 1 - vendor/golang.org/x/term/term.go | 60 - vendor/golang.org/x/term/term_plan9.go | 42 - vendor/golang.org/x/term/term_unix.go | 92 - vendor/golang.org/x/term/term_unix_bsd.go | 13 - vendor/golang.org/x/term/term_unix_other.go | 13 - vendor/golang.org/x/term/term_unsupported.go | 39 - vendor/golang.org/x/term/term_windows.go | 79 - vendor/golang.org/x/term/terminal.go | 986 ------ vendor/gopkg.in/errgo.v1/LICENSE | 26 - vendor/gopkg.in/errgo.v1/README.md | 259 -- vendor/gopkg.in/errgo.v1/errors.go | 389 --- vendor/gopkg.in/httprequest.v1/.travis.yml | 5 - vendor/gopkg.in/httprequest.v1/LICENSE | 185 -- vendor/gopkg.in/httprequest.v1/README.md | 690 ----- vendor/gopkg.in/httprequest.v1/client.go | 308 -- vendor/gopkg.in/httprequest.v1/error.go | 121 - vendor/gopkg.in/httprequest.v1/fancyerror.go | 277 -- vendor/gopkg.in/httprequest.v1/handler.go | 669 ---- vendor/gopkg.in/httprequest.v1/marshal.go | 435 --- vendor/gopkg.in/httprequest.v1/type.go | 412 --- vendor/gopkg.in/httprequest.v1/unmarshal.go | 262 -- vendor/gopkg.in/macaroon.v2/.gitignore | 1 - vendor/gopkg.in/macaroon.v2/.travis.yml | 11 - vendor/gopkg.in/macaroon.v2/LICENSE | 26 - vendor/gopkg.in/macaroon.v2/README.md | 355 --- vendor/gopkg.in/macaroon.v2/TODO | 4 - vendor/gopkg.in/macaroon.v2/crypto.go | 91 - vendor/gopkg.in/macaroon.v2/dependencies.tsv | 5 - vendor/gopkg.in/macaroon.v2/macaroon.go | 399 --- vendor/gopkg.in/macaroon.v2/marshal-v1.go | 190 -- vendor/gopkg.in/macaroon.v2/marshal-v2.go | 253 -- vendor/gopkg.in/macaroon.v2/marshal.go | 239 -- vendor/gopkg.in/macaroon.v2/packet-v1.go | 133 - vendor/gopkg.in/macaroon.v2/packet-v2.go | 117 - vendor/gopkg.in/macaroon.v2/trace.go | 102 - vendor/modules.txt | 93 +- 515 files changed, 45 insertions(+), 95062 deletions(-) delete mode 100644 config/lxd.go delete mode 100644 config/lxd_test.go delete mode 100644 runner/providers/lxd/images.go delete mode 100644 runner/providers/lxd/lxd.go delete mode 100644 runner/providers/lxd/specs.go delete mode 100644 runner/providers/lxd/util.go create mode 100644 test/integration/config/garm-provider-lxd.toml delete mode 100644 vendor/github.com/cloudbase/garm-provider-common/cloudconfig/cloudconfig.go delete mode 100644 vendor/github.com/cloudbase/garm-provider-common/cloudconfig/templates.go delete mode 100644 vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go delete mode 100644 vendor/github.com/flosch/pongo2/.gitattributes delete mode 100644 vendor/github.com/flosch/pongo2/.gitignore delete mode 100644 vendor/github.com/flosch/pongo2/.travis.yml delete mode 100644 vendor/github.com/flosch/pongo2/AUTHORS delete mode 100644 vendor/github.com/flosch/pongo2/LICENSE delete mode 100644 vendor/github.com/flosch/pongo2/README.md delete mode 100644 vendor/github.com/flosch/pongo2/context.go delete mode 100644 vendor/github.com/flosch/pongo2/doc.go delete mode 100644 vendor/github.com/flosch/pongo2/error.go delete mode 100644 vendor/github.com/flosch/pongo2/filters.go delete mode 100644 vendor/github.com/flosch/pongo2/filters_builtin.go delete mode 100644 vendor/github.com/flosch/pongo2/helpers.go delete mode 100644 vendor/github.com/flosch/pongo2/lexer.go delete mode 100644 vendor/github.com/flosch/pongo2/nodes.go delete mode 100644 vendor/github.com/flosch/pongo2/nodes_html.go delete mode 100644 vendor/github.com/flosch/pongo2/nodes_wrapper.go delete mode 100644 vendor/github.com/flosch/pongo2/options.go delete mode 100644 vendor/github.com/flosch/pongo2/parser.go delete mode 100644 vendor/github.com/flosch/pongo2/parser_document.go delete mode 100644 vendor/github.com/flosch/pongo2/parser_expression.go delete mode 100644 vendor/github.com/flosch/pongo2/pongo2.go delete mode 100644 vendor/github.com/flosch/pongo2/tags.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_autoescape.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_block.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_comment.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_cycle.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_extends.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_filter.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_firstof.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_for.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_if.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_ifchanged.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_ifequal.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_ifnotequal.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_import.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_include.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_lorem.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_macro.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_now.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_set.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_spaceless.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_ssi.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_templatetag.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_widthratio.go delete mode 100644 vendor/github.com/flosch/pongo2/tags_with.go delete mode 100644 vendor/github.com/flosch/pongo2/template.go delete mode 100644 vendor/github.com/flosch/pongo2/template_loader.go delete mode 100644 vendor/github.com/flosch/pongo2/template_sets.go delete mode 100644 vendor/github.com/flosch/pongo2/value.go delete mode 100644 vendor/github.com/flosch/pongo2/variable.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/README.md delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/id.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go delete mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto delete mode 100644 vendor/github.com/juju/webbrowser/.gitignore delete mode 100644 vendor/github.com/juju/webbrowser/LICENSE delete mode 100644 vendor/github.com/juju/webbrowser/README.md delete mode 100644 vendor/github.com/juju/webbrowser/webbrowser.go delete mode 100644 vendor/github.com/julienschmidt/httprouter/.travis.yml delete mode 100644 vendor/github.com/julienschmidt/httprouter/LICENSE delete mode 100644 vendor/github.com/julienschmidt/httprouter/README.md delete mode 100644 vendor/github.com/julienschmidt/httprouter/path.go delete mode 100644 vendor/github.com/julienschmidt/httprouter/router.go delete mode 100644 vendor/github.com/julienschmidt/httprouter/tree.go delete mode 100644 vendor/github.com/kballard/go-shellquote/LICENSE delete mode 100644 vendor/github.com/kballard/go-shellquote/README delete mode 100644 vendor/github.com/kballard/go-shellquote/doc.go delete mode 100644 vendor/github.com/kballard/go-shellquote/quote.go delete mode 100644 vendor/github.com/kballard/go-shellquote/unquote.go delete mode 100644 vendor/github.com/kr/fs/LICENSE delete mode 100644 vendor/github.com/kr/fs/Readme delete mode 100644 vendor/github.com/kr/fs/filesystem.go delete mode 100644 vendor/github.com/kr/fs/walk.go delete mode 100644 vendor/github.com/lxc/lxd/AUTHORS delete mode 100644 vendor/github.com/lxc/lxd/COPYING delete mode 100644 vendor/github.com/lxc/lxd/client/connection.go delete mode 100644 vendor/github.com/lxc/lxd/client/doc.go delete mode 100644 vendor/github.com/lxc/lxd/client/events.go delete mode 100644 vendor/github.com/lxc/lxd/client/interfaces.go delete mode 100644 vendor/github.com/lxc/lxd/client/interfaces_legacy.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_certificates.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_cluster.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_containers.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_events.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_images.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_instances.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_network_acls.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_network_forwards.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_network_load_balancers.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_network_peer.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_network_zones.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_networks.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_operations.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_profiles.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_projects.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_server.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_storage_buckets.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_storage_pools.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_storage_volumes.go delete mode 100644 vendor/github.com/lxc/lxd/client/lxd_warnings.go delete mode 100644 vendor/github.com/lxc/lxd/client/operations.go delete mode 100644 vendor/github.com/lxc/lxd/client/simplestreams.go delete mode 100644 vendor/github.com/lxc/lxd/client/simplestreams_images.go delete mode 100644 vendor/github.com/lxc/lxd/client/util.go delete mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/consts.go delete mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/device_proxyaddress.go delete mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/device_runconfig.go delete mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/devices.go delete mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/devices_sort.go delete mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/devices_utils.go delete mode 100644 vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_type.go delete mode 100644 vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_vmagent.go delete mode 100644 vendor/github.com/lxc/lxd/lxd/revert/revert.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/certificate.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/cluster.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/cluster_state.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/container.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/container_backup.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/container_console.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/container_exec.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/container_snapshot.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/container_state.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/devlxd.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/doc.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/error.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/event.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/event_lifecycle.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/image.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/init.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/instance.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_backup.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_console.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_exec.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_snapshot.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_state.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/migration.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/network.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/network_acl.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/network_forward.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/network_load_balancer.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/network_peer.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/network_zone.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/operation.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/profile.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/project.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/resource.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/response.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/server.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/status_code.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool_bucket.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool_volume.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_backup.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_snapshot.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_state.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/url.go delete mode 100644 vendor/github.com/lxc/lxd/shared/api/warning.go delete mode 100644 vendor/github.com/lxc/lxd/shared/archive.go delete mode 100644 vendor/github.com/lxc/lxd/shared/cancel/canceller.go delete mode 100644 vendor/github.com/lxc/lxd/shared/cancel/http.go delete mode 100644 vendor/github.com/lxc/lxd/shared/cert.go delete mode 100644 vendor/github.com/lxc/lxd/shared/cgo.go delete mode 100644 vendor/github.com/lxc/lxd/shared/instance.go delete mode 100644 vendor/github.com/lxc/lxd/shared/ioprogress/data.go delete mode 100644 vendor/github.com/lxc/lxd/shared/ioprogress/reader.go delete mode 100644 vendor/github.com/lxc/lxd/shared/ioprogress/tracker.go delete mode 100644 vendor/github.com/lxc/lxd/shared/ioprogress/writer.go delete mode 100644 vendor/github.com/lxc/lxd/shared/json.go delete mode 100644 vendor/github.com/lxc/lxd/shared/logger/format.go delete mode 100644 vendor/github.com/lxc/lxd/shared/logger/log.go delete mode 100644 vendor/github.com/lxc/lxd/shared/logger/syslog_linux.go delete mode 100644 vendor/github.com/lxc/lxd/shared/logger/syslog_other.go delete mode 100644 vendor/github.com/lxc/lxd/shared/logger/toplevel.go delete mode 100644 vendor/github.com/lxc/lxd/shared/logger/types.go delete mode 100644 vendor/github.com/lxc/lxd/shared/logger/wrapper.go delete mode 100644 vendor/github.com/lxc/lxd/shared/network.go delete mode 100644 vendor/github.com/lxc/lxd/shared/network_ip.go delete mode 100644 vendor/github.com/lxc/lxd/shared/network_unix.go delete mode 100644 vendor/github.com/lxc/lxd/shared/network_windows.go delete mode 100644 vendor/github.com/lxc/lxd/shared/osarch/architectures.go delete mode 100644 vendor/github.com/lxc/lxd/shared/osarch/architectures_linux.go delete mode 100644 vendor/github.com/lxc/lxd/shared/osarch/architectures_others.go delete mode 100644 vendor/github.com/lxc/lxd/shared/osarch/release.go delete mode 100644 vendor/github.com/lxc/lxd/shared/proxy.go delete mode 100644 vendor/github.com/lxc/lxd/shared/simplestreams/index.go delete mode 100644 vendor/github.com/lxc/lxd/shared/simplestreams/products.go delete mode 100644 vendor/github.com/lxc/lxd/shared/simplestreams/simplestreams.go delete mode 100644 vendor/github.com/lxc/lxd/shared/simplestreams/sort.go delete mode 100644 vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user.go delete mode 100644 vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user_noop.go delete mode 100644 vendor/github.com/lxc/lxd/shared/tcp/tcp_timeouts.go delete mode 100644 vendor/github.com/lxc/lxd/shared/termios/termios.go delete mode 100644 vendor/github.com/lxc/lxd/shared/termios/termios_linux.go delete mode 100644 vendor/github.com/lxc/lxd/shared/termios/termios_other.go delete mode 100644 vendor/github.com/lxc/lxd/shared/units/units.go delete mode 100644 vendor/github.com/lxc/lxd/shared/util.go delete mode 100644 vendor/github.com/lxc/lxd/shared/util_linux.go delete mode 100644 vendor/github.com/lxc/lxd/shared/util_unix.go delete mode 100644 vendor/github.com/lxc/lxd/shared/util_windows.go delete mode 100644 vendor/github.com/lxc/lxd/shared/validate/validate.go delete mode 100644 vendor/github.com/pborman/uuid/.travis.yml delete mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/pborman/uuid/LICENSE delete mode 100644 vendor/github.com/pborman/uuid/README.md delete mode 100644 vendor/github.com/pborman/uuid/dce.go delete mode 100644 vendor/github.com/pborman/uuid/doc.go delete mode 100644 vendor/github.com/pborman/uuid/hash.go delete mode 100644 vendor/github.com/pborman/uuid/marshal.go delete mode 100644 vendor/github.com/pborman/uuid/node.go delete mode 100644 vendor/github.com/pborman/uuid/sql.go delete mode 100644 vendor/github.com/pborman/uuid/time.go delete mode 100644 vendor/github.com/pborman/uuid/util.go delete mode 100644 vendor/github.com/pborman/uuid/uuid.go delete mode 100644 vendor/github.com/pborman/uuid/version1.go delete mode 100644 vendor/github.com/pborman/uuid/version4.go delete mode 100644 vendor/github.com/pkg/sftp/.gitignore delete mode 100644 vendor/github.com/pkg/sftp/CONTRIBUTORS delete mode 100644 vendor/github.com/pkg/sftp/LICENSE delete mode 100644 vendor/github.com/pkg/sftp/Makefile delete mode 100644 vendor/github.com/pkg/sftp/README.md delete mode 100644 vendor/github.com/pkg/sftp/allocator.go delete mode 100644 vendor/github.com/pkg/sftp/attrs.go delete mode 100644 vendor/github.com/pkg/sftp/attrs_stubs.go delete mode 100644 vendor/github.com/pkg/sftp/attrs_unix.go delete mode 100644 vendor/github.com/pkg/sftp/client.go delete mode 100644 vendor/github.com/pkg/sftp/conn.go delete mode 100644 vendor/github.com/pkg/sftp/debug.go delete mode 100644 vendor/github.com/pkg/sftp/fuzz.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go delete mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go delete mode 100644 vendor/github.com/pkg/sftp/ls_formatting.go delete mode 100644 vendor/github.com/pkg/sftp/ls_plan9.go delete mode 100644 vendor/github.com/pkg/sftp/ls_stub.go delete mode 100644 vendor/github.com/pkg/sftp/ls_unix.go delete mode 100644 vendor/github.com/pkg/sftp/match.go delete mode 100644 vendor/github.com/pkg/sftp/packet-manager.go delete mode 100644 vendor/github.com/pkg/sftp/packet-typing.go delete mode 100644 vendor/github.com/pkg/sftp/packet.go delete mode 100644 vendor/github.com/pkg/sftp/pool.go delete mode 100644 vendor/github.com/pkg/sftp/release.go delete mode 100644 vendor/github.com/pkg/sftp/request-attrs.go delete mode 100644 vendor/github.com/pkg/sftp/request-errors.go delete mode 100644 vendor/github.com/pkg/sftp/request-example.go delete mode 100644 vendor/github.com/pkg/sftp/request-interfaces.go delete mode 100644 vendor/github.com/pkg/sftp/request-plan9.go delete mode 100644 vendor/github.com/pkg/sftp/request-readme.md delete mode 100644 vendor/github.com/pkg/sftp/request-server.go delete mode 100644 vendor/github.com/pkg/sftp/request-unix.go delete mode 100644 vendor/github.com/pkg/sftp/request.go delete mode 100644 vendor/github.com/pkg/sftp/request_windows.go delete mode 100644 vendor/github.com/pkg/sftp/server.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_darwin.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_impl.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_linux.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_plan9.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_stubs.go delete mode 100644 vendor/github.com/pkg/sftp/sftp.go delete mode 100644 vendor/github.com/pkg/sftp/stat_plan9.go delete mode 100644 vendor/github.com/pkg/sftp/stat_posix.go delete mode 100644 vendor/github.com/pkg/sftp/syscall_fixed.go delete mode 100644 vendor/github.com/pkg/sftp/syscall_good.go delete mode 100644 vendor/github.com/pkg/xattr/.gitignore delete mode 100644 vendor/github.com/pkg/xattr/LICENSE delete mode 100644 vendor/github.com/pkg/xattr/README.md delete mode 100644 vendor/github.com/pkg/xattr/xattr.go delete mode 100644 vendor/github.com/pkg/xattr/xattr_bsd.go delete mode 100644 vendor/github.com/pkg/xattr/xattr_darwin.go delete mode 100644 vendor/github.com/pkg/xattr/xattr_linux.go delete mode 100644 vendor/github.com/pkg/xattr/xattr_solaris.go delete mode 100644 vendor/github.com/pkg/xattr/xattr_unsupported.go delete mode 100644 vendor/github.com/robfig/cron/v3/.gitignore delete mode 100644 vendor/github.com/robfig/cron/v3/.travis.yml delete mode 100644 vendor/github.com/robfig/cron/v3/LICENSE delete mode 100644 vendor/github.com/robfig/cron/v3/README.md delete mode 100644 vendor/github.com/robfig/cron/v3/chain.go delete mode 100644 vendor/github.com/robfig/cron/v3/constantdelay.go delete mode 100644 vendor/github.com/robfig/cron/v3/cron.go delete mode 100644 vendor/github.com/robfig/cron/v3/doc.go delete mode 100644 vendor/github.com/robfig/cron/v3/logger.go delete mode 100644 vendor/github.com/robfig/cron/v3/option.go delete mode 100644 vendor/github.com/robfig/cron/v3/parser.go delete mode 100644 vendor/github.com/robfig/cron/v3/spec.go delete mode 100644 vendor/github.com/rogpeppe/fastuuid/LICENSE delete mode 100644 vendor/github.com/rogpeppe/fastuuid/README.md delete mode 100644 vendor/github.com/rogpeppe/fastuuid/uuid.go delete mode 100644 vendor/github.com/sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml delete mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/sirupsen/logrus/LICENSE delete mode 100644 vendor/github.com/sirupsen/logrus/README.md delete mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go delete mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml delete mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go delete mode 100644 vendor/github.com/sirupsen/logrus/doc.go delete mode 100644 vendor/github.com/sirupsen/logrus/entry.go delete mode 100644 vendor/github.com/sirupsen/logrus/exported.go delete mode 100644 vendor/github.com/sirupsen/logrus/formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/hooks.go delete mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/README.md delete mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go delete mode 100644 vendor/github.com/sirupsen/logrus/hooks/writer/README.md delete mode 100644 vendor/github.com/sirupsen/logrus/hooks/writer/writer.go delete mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/logger.go delete mode 100644 vendor/github.com/sirupsen/logrus/logrus.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go delete mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/writer.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/register.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_compat.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_go120.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/README delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 vendor/golang.org/x/crypto/nacl/box/box.go delete mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go delete mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go delete mode 100644 vendor/golang.org/x/crypto/ssh/certs.go delete mode 100644 vendor/golang.org/x/crypto/ssh/channel.go delete mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go delete mode 100644 vendor/golang.org/x/crypto/ssh/common.go delete mode 100644 vendor/golang.org/x/crypto/ssh/connection.go delete mode 100644 vendor/golang.org/x/crypto/ssh/doc.go delete mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go delete mode 100644 vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go delete mode 100644 vendor/golang.org/x/crypto/ssh/kex.go delete mode 100644 vendor/golang.org/x/crypto/ssh/keys.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mac.go delete mode 100644 vendor/golang.org/x/crypto/ssh/messages.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/server.go delete mode 100644 vendor/golang.org/x/crypto/ssh/session.go delete mode 100644 vendor/golang.org/x/crypto/ssh/ssh_gss.go delete mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/transport.go delete mode 100644 vendor/golang.org/x/net/html/atom/atom.go delete mode 100644 vendor/golang.org/x/net/html/atom/table.go delete mode 100644 vendor/golang.org/x/net/html/const.go delete mode 100644 vendor/golang.org/x/net/html/doc.go delete mode 100644 vendor/golang.org/x/net/html/doctype.go delete mode 100644 vendor/golang.org/x/net/html/entity.go delete mode 100644 vendor/golang.org/x/net/html/escape.go delete mode 100644 vendor/golang.org/x/net/html/foreign.go delete mode 100644 vendor/golang.org/x/net/html/node.go delete mode 100644 vendor/golang.org/x/net/html/parse.go delete mode 100644 vendor/golang.org/x/net/html/render.go delete mode 100644 vendor/golang.org/x/net/html/token.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/data/children delete mode 100644 vendor/golang.org/x/net/publicsuffix/data/nodes delete mode 100644 vendor/golang.org/x/net/publicsuffix/data/text delete mode 100644 vendor/golang.org/x/net/publicsuffix/list.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/table.go delete mode 100644 vendor/golang.org/x/sys/plan9/asm.s delete mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_386.s delete mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s delete mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_arm.s delete mode 100644 vendor/golang.org/x/sys/plan9/const_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/dir_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/env_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/errors_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/mkall.sh delete mode 100644 vendor/golang.org/x/sys/plan9/mkerrors.sh delete mode 100644 vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh delete mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/pwd_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/race.go delete mode 100644 vendor/golang.org/x/sys/plan9/race0.go delete mode 100644 vendor/golang.org/x/sys/plan9/str.go delete mode 100644 vendor/golang.org/x/sys/plan9/syscall.go delete mode 100644 vendor/golang.org/x/sys/plan9/syscall_plan9.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go delete mode 100644 vendor/golang.org/x/sys/plan9/zsysnum_plan9.go delete mode 100644 vendor/golang.org/x/term/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/term/LICENSE delete mode 100644 vendor/golang.org/x/term/PATENTS delete mode 100644 vendor/golang.org/x/term/README.md delete mode 100644 vendor/golang.org/x/term/codereview.cfg delete mode 100644 vendor/golang.org/x/term/term.go delete mode 100644 vendor/golang.org/x/term/term_plan9.go delete mode 100644 vendor/golang.org/x/term/term_unix.go delete mode 100644 vendor/golang.org/x/term/term_unix_bsd.go delete mode 100644 vendor/golang.org/x/term/term_unix_other.go delete mode 100644 vendor/golang.org/x/term/term_unsupported.go delete mode 100644 vendor/golang.org/x/term/term_windows.go delete mode 100644 vendor/golang.org/x/term/terminal.go delete mode 100644 vendor/gopkg.in/errgo.v1/LICENSE delete mode 100644 vendor/gopkg.in/errgo.v1/README.md delete mode 100644 vendor/gopkg.in/errgo.v1/errors.go delete mode 100644 vendor/gopkg.in/httprequest.v1/.travis.yml delete mode 100644 vendor/gopkg.in/httprequest.v1/LICENSE delete mode 100644 vendor/gopkg.in/httprequest.v1/README.md delete mode 100644 vendor/gopkg.in/httprequest.v1/client.go delete mode 100644 vendor/gopkg.in/httprequest.v1/error.go delete mode 100644 vendor/gopkg.in/httprequest.v1/fancyerror.go delete mode 100644 vendor/gopkg.in/httprequest.v1/handler.go delete mode 100644 vendor/gopkg.in/httprequest.v1/marshal.go delete mode 100644 vendor/gopkg.in/httprequest.v1/type.go delete mode 100644 vendor/gopkg.in/httprequest.v1/unmarshal.go delete mode 100644 vendor/gopkg.in/macaroon.v2/.gitignore delete mode 100644 vendor/gopkg.in/macaroon.v2/.travis.yml delete mode 100644 vendor/gopkg.in/macaroon.v2/LICENSE delete mode 100644 vendor/gopkg.in/macaroon.v2/README.md delete mode 100644 vendor/gopkg.in/macaroon.v2/TODO delete mode 100644 vendor/gopkg.in/macaroon.v2/crypto.go delete mode 100644 vendor/gopkg.in/macaroon.v2/dependencies.tsv delete mode 100644 vendor/gopkg.in/macaroon.v2/macaroon.go delete mode 100644 vendor/gopkg.in/macaroon.v2/marshal-v1.go delete mode 100644 vendor/gopkg.in/macaroon.v2/marshal-v2.go delete mode 100644 vendor/gopkg.in/macaroon.v2/marshal.go delete mode 100644 vendor/gopkg.in/macaroon.v2/packet-v1.go delete mode 100644 vendor/gopkg.in/macaroon.v2/packet-v2.go delete mode 100644 vendor/gopkg.in/macaroon.v2/trace.go diff --git a/config/config.go b/config/config.go index adab7fa0..89335306 100644 --- a/config/config.go +++ b/config/config.go @@ -223,7 +223,6 @@ type Provider struct { // tokens to be used. This may happen if a provider has not yet been updated to support // JIT configuration. DisableJITConfig bool `toml:"disable_jit_config" json:"disable-jit-config"` - LXD LXD `toml:"lxd" json:"lxd"` External External `toml:"external" json:"external"` } @@ -233,10 +232,6 @@ func (p *Provider) Validate() error { } switch p.ProviderType { - case params.LXDProvider: - if err := p.LXD.Validate(); err != nil { - return errors.Wrap(err, "validating LXD provider info") - } case params.ExternalProvider: if err := p.External.Validate(); err != nil { return errors.Wrap(err, "validating external provider info") diff --git a/config/config_test.go b/config/config_test.go index e7f8489f..2281d196 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -20,7 +20,6 @@ import ( "testing" "time" - "github.com/cloudbase/garm/params" "github.com/cloudbase/garm/util/appdefaults" "github.com/stretchr/testify/require" ) @@ -76,15 +75,7 @@ func getDefaultDatabaseConfig(dir string) Database { } func getDefaultProvidersConfig() []Provider { - lxdConfig := getDefaultLXDConfig() - return []Provider{ - { - Name: "test_lxd", - ProviderType: params.LXDProvider, - Description: "test LXD provider", - LXD: lxdConfig, - }, - } + return []Provider{} } func getDefaultGithubConfig() []Github { diff --git a/config/lxd.go b/config/lxd.go deleted file mode 100644 index 8b8b1f7e..00000000 --- a/config/lxd.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2022 Cloudbase Solutions SRL -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package config - -import ( - "fmt" - "net/url" - "os" - - "github.com/pkg/errors" -) - -type LXDRemoteProtocol string -type LXDImageType string - -func (l LXDImageType) String() string { - return string(l) -} - -const ( - SimpleStreams LXDRemoteProtocol = "simplestreams" - LXDImageVirtualMachine LXDImageType = "virtual-machine" - LXDImageContainer LXDImageType = "container" -) - -// LXDImageRemote holds information about a remote server from which LXD can fetch -// OS images. Typically this will be a simplestreams server. -type LXDImageRemote struct { - Address string `toml:"addr" json:"addr"` - Public bool `toml:"public" json:"public"` - Protocol LXDRemoteProtocol `toml:"protocol" json:"protocol"` - InsecureSkipVerify bool `toml:"skip_verify" json:"skip-verify"` -} - -func (l *LXDImageRemote) Validate() error { - if l.Protocol != SimpleStreams { - // Only supports simplestreams for now. - return fmt.Errorf("invalid remote protocol %s. Supported protocols: %s", l.Protocol, SimpleStreams) - } - if l.Address == "" { - return fmt.Errorf("missing address") - } - - url, err := url.ParseRequestURI(l.Address) - if err != nil { - return errors.Wrap(err, "validating address") - } - - if url.Scheme != "http" && url.Scheme != "https" { - return fmt.Errorf("address must be http or https") - } - - return nil -} - -// LXD holds connection information for an LXD cluster. -type LXD struct { - // UnixSocket is the path on disk to the LXD unix socket. If defined, - // this is prefered over connecting via HTTPs. - UnixSocket string `toml:"unix_socket_path" json:"unix-socket-path"` - - // Project name is the name of the project in which this runner will create - // instances. If this option is not set, the default project will be used. - // The project used here, must have all required profiles created by you - // beforehand. For LXD, the "flavor" used in the runner definition for a pool - // equates to a profile in the desired project. - ProjectName string `toml:"project_name" json:"project-name"` - - // IncludeDefaultProfile specifies whether or not this provider will always add - // the "default" profile to any newly created instance. - IncludeDefaultProfile bool `toml:"include_default_profile" json:"include-default-profile"` - - // URL holds the URL of the remote LXD server. - // example: https://10.10.10.1:8443/ - URL string `toml:"url" json:"url"` - // ClientCertificate is the x509 client certificate path used for authentication. - ClientCertificate string `toml:"client_certificate" json:"client_certificate"` - // ClientKey is the key used for client certificate authentication. - ClientKey string `toml:"client_key" json:"client-key"` - // TLS certificate of the remote server. If not specified, the system CA is used. - TLSServerCert string `toml:"tls_server_certificate" json:"tls-server-certificate"` - // TLSCA is the TLS CA certificate when running LXD in PKI mode. - TLSCA string `toml:"tls_ca" json:"tls-ca"` - - // ImageRemotes is a map to a set of remote image repositories we can use to - // download images. - ImageRemotes map[string]LXDImageRemote `toml:"image_remotes" json:"image-remotes"` - - // SecureBoot enables secure boot for VMs spun up using this provider. - SecureBoot bool `toml:"secure_boot" json:"secure-boot"` - - // InstanceType allows you to choose between a virtual machine and a container - InstanceType LXDImageType `toml:"instance_type" json:"instance-type"` -} - -func (l *LXD) GetInstanceType() LXDImageType { - switch l.InstanceType { - case LXDImageVirtualMachine, LXDImageContainer: - return l.InstanceType - default: - return LXDImageVirtualMachine - } -} - -func (l *LXD) Validate() error { - if l.UnixSocket != "" { - if _, err := os.Stat(l.UnixSocket); err != nil { - return fmt.Errorf("could not access unix socket %s: %q", l.UnixSocket, err) - } - - return nil - } - - if l.URL == "" { - return fmt.Errorf("unix_socket or address must be specified") - } - - url, err := url.ParseRequestURI(l.URL) - if err != nil { - return fmt.Errorf("invalid LXD URL") - } - - if url.Scheme != "https" { - return fmt.Errorf("address must be https") - } - - if l.ClientCertificate == "" || l.ClientKey == "" { - return fmt.Errorf("client_certificate and client_key are mandatory") - } - - if _, err := os.Stat(l.ClientCertificate); err != nil { - return fmt.Errorf("failed to access client certificate %s: %q", l.ClientCertificate, err) - } - - if _, err := os.Stat(l.ClientKey); err != nil { - return fmt.Errorf("failed to access client key %s: %q", l.ClientKey, err) - } - - if l.TLSServerCert != "" { - if _, err := os.Stat(l.TLSServerCert); err != nil { - return fmt.Errorf("failed to access tls_server_certificate %s: %q", l.TLSServerCert, err) - } - } - - for name, val := range l.ImageRemotes { - if err := val.Validate(); err != nil { - return fmt.Errorf("remote %s is invalid: %s", name, err) - } - } - return nil -} diff --git a/config/lxd_test.go b/config/lxd_test.go deleted file mode 100644 index 1bba515d..00000000 --- a/config/lxd_test.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2022 Cloudbase Solutions SRL -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package config - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func getDefaultLXDImageRemoteConfig() LXDImageRemote { - return LXDImageRemote{ - Address: "https://cloud-images.ubuntu.com/releases", - Public: true, - Protocol: SimpleStreams, - InsecureSkipVerify: false, - } -} - -func getDefaultLXDConfig() LXD { - remote := getDefaultLXDImageRemoteConfig() - return LXD{ - URL: "https://example.com:8443", - ProjectName: "default", - IncludeDefaultProfile: false, - ClientCertificate: "../testdata/lxd/certs/client.crt", - ClientKey: "../testdata/lxd/certs/client.key", - TLSServerCert: "../testdata/lxd/certs/servercert.crt", - ImageRemotes: map[string]LXDImageRemote{ - "default": remote, - }, - SecureBoot: false, - } -} - -func TestLXDRemote(t *testing.T) { - cfg := getDefaultLXDImageRemoteConfig() - - err := cfg.Validate() - require.Nil(t, err) -} - -func TestLXDRemoteEmptyAddress(t *testing.T) { - cfg := getDefaultLXDImageRemoteConfig() - - cfg.Address = "" - - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "missing address") -} - -func TestLXDRemoteInvalidAddress(t *testing.T) { - cfg := getDefaultLXDImageRemoteConfig() - - cfg.Address = "bogus address" - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "validating address: parse \"bogus address\": invalid URI for request") -} - -func TestLXDRemoteIvalidAddressScheme(t *testing.T) { - cfg := getDefaultLXDImageRemoteConfig() - - cfg.Address = "ftp://whatever" - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "address must be http or https") -} - -func TestLXDConfig(t *testing.T) { - cfg := getDefaultLXDConfig() - err := cfg.Validate() - require.Nil(t, err) -} - -func TestLXDWithInvalidUnixSocket(t *testing.T) { - cfg := getDefaultLXDConfig() - - cfg.UnixSocket = "bogus unix socket" - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "could not access unix socket bogus unix socket: \"stat bogus unix socket: no such file or directory\"") -} - -func TestMissingUnixSocketAndMissingURL(t *testing.T) { - cfg := getDefaultLXDConfig() - - cfg.URL = "" - cfg.UnixSocket = "" - - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "unix_socket or address must be specified") -} - -func TestInvalidLXDURL(t *testing.T) { - cfg := getDefaultLXDConfig() - cfg.URL = "bogus" - - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "invalid LXD URL") -} - -func TestLXDURLIsHTTPS(t *testing.T) { - cfg := getDefaultLXDConfig() - cfg.URL = "http://example.com" - - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "address must be https") -} - -func TestMissingClientCertOrKey(t *testing.T) { - cfg := getDefaultLXDConfig() - cfg.ClientKey = "" - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "client_certificate and client_key are mandatory") - - cfg = getDefaultLXDConfig() - cfg.ClientCertificate = "" - err = cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "client_certificate and client_key are mandatory") -} - -func TestLXDIvalidCertOrKeyPaths(t *testing.T) { - cfg := getDefaultLXDConfig() - cfg.ClientCertificate = "/i/am/not/here" - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "failed to access client certificate /i/am/not/here: \"stat /i/am/not/here: no such file or directory\"") - - cfg.ClientCertificate = "../testdata/lxd/certs/client.crt" - cfg.ClientKey = "/me/neither" - - err = cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "failed to access client key /me/neither: \"stat /me/neither: no such file or directory\"") -} - -func TestLXDInvalidServerCertPath(t *testing.T) { - cfg := getDefaultLXDConfig() - cfg.TLSServerCert = "/not/a/valid/server/cert/path" - - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "failed to access tls_server_certificate /not/a/valid/server/cert/path: \"stat /not/a/valid/server/cert/path: no such file or directory\"") -} - -func TestInvalidLXDImageRemotes(t *testing.T) { - cfg := getDefaultLXDConfig() - - cfg.ImageRemotes["default"] = LXDImageRemote{ - Protocol: LXDRemoteProtocol("bogus"), - } - - err := cfg.Validate() - require.NotNil(t, err) - require.EqualError(t, err, "remote default is invalid: invalid remote protocol bogus. Supported protocols: simplestreams") -} diff --git a/go.mod b/go.mod index 41862fe0..5ae41cd7 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,6 @@ require ( github.com/jedib0t/go-pretty/v6 v6.4.6 github.com/juju/clock v1.0.3 github.com/juju/retry v1.0.0 - github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce github.com/manifoldco/promptui v0.9.0 github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 github.com/pkg/errors v0.9.1 @@ -43,12 +42,8 @@ require ( github.com/chzyer/readline v1.5.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 // indirect - github.com/frankban/quicktest v1.14.3 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1 // indirect - github.com/go-macaroon-bakery/macaroonpb v1.0.0 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect @@ -63,11 +58,8 @@ require ( github.com/jinzhu/now v1.1.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/juju/errors v1.0.0 // indirect + github.com/juju/loggo v1.0.0 // indirect github.com/juju/testing v1.0.2 // indirect - github.com/juju/webbrowser v1.0.0 // indirect - github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/kr/fs v0.1.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.19 // indirect @@ -78,17 +70,11 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pborman/uuid v1.2.1 // indirect - github.com/pkg/sftp v1.13.5 // indirect - github.com/pkg/xattr v0.4.9 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/robfig/cron/v3 v3.0.1 // indirect - github.com/rogpeppe/fastuuid v1.2.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect @@ -97,12 +83,8 @@ require ( go.opentelemetry.io/otel/trace v1.14.0 // indirect golang.org/x/net v0.14.0 // indirect golang.org/x/sys v0.11.0 // indirect - golang.org/x/term v0.11.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect - gopkg.in/errgo.v1 v1.0.1 // indirect - gopkg.in/httprequest.v1 v1.2.1 // indirect - gopkg.in/macaroon.v2 v2.1.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 4ec6308b..c45808ea 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,3 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= @@ -9,7 +8,6 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -21,7 +19,6 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudbase/garm-provider-common v0.1.1-0.20231012061429-49001794e700 h1:ZCJ1zZ2WI/37ffzpRsu7t5zzShAMThhYsXw7bBNKBR0= github.com/cloudbase/garm-provider-common v0.1.1-0.20231012061429-49001794e700/go.mod h1:igxJRT3OlykERYc6ssdRQXcb+BCaeSfnucg6I0OSoDc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -29,28 +26,14 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 h1:fmFk0Wt3bBxxwZnu48jqMdaOR/IZ4vdtJFuaFV8MpIE= -github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3/go.mod h1:bJWSKrZyQvfTnb2OudyUjurSG4/edverV7n82+K3JiM= -github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k= -github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= -github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1 h1:uvQJoKTHrFFu8zxoaopNKedRzwdy3+8H72we4T/5cGs= -github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1/go.mod h1:H59IYeChwvD1po3dhGUPvq5na+4NVD7SJlbhGKvslr0= -github.com/go-macaroon-bakery/macaroonpb v1.0.0 h1:It9exBaRMZ9iix1iJ6gwzfwsDE6ExNuwtAJ9e09v6XE= -github.com/go-macaroon-bakery/macaroonpb v1.0.0/go.mod h1:UzrGOcbiwTXISFP2XDLDPjfhMINZa+fX/7A2lMd31zc= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= @@ -119,39 +102,21 @@ github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0kt github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v55 v55.0.1-0.20230921135834-aa3fcbe7aabc h1:wZybOt4gfOPJmwpe3CZFJYoREaqgngGeo1Y29zZePhg= github.com/google/go-github/v55 v55.0.1-0.20230921135834-aa3fcbe7aabc/go.mod h1:dx9O5B1Z9+WYDRfSIkPdJ/jszShiNtl++jbgL/3OM2c= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -190,45 +155,32 @@ github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= github.com/juju/clock v1.0.3 h1:yJHIsWXeU8j3QcBdiess09SzfiXRRrsjKPn2whnMeds= github.com/juju/clock v1.0.3/go.mod h1:HIBvJ8kiV/n7UHwKuCkdYL4l/MDECztHR2sAvWDxxf0= github.com/juju/errors v1.0.0 h1:yiq7kjCLll1BiaRuNY53MGI0+EQ3rF6GB+wvboZDefM= github.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5Qe8= github.com/juju/loggo v1.0.0 h1:Y6ZMQOGR9Aj3BGkiWx7HBbIx6zNwNkxhVNOHU2i1bl0= github.com/juju/loggo v1.0.0/go.mod h1:NIXFioti1SmKAlKNuUwbMenNdef59IF52+ZzuOmHYkg= -github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4= -github.com/juju/qthttptest v0.1.3 h1:M0HdpwsK/UTHRGRcIw5zvh5z+QOgdqyK+ecDMN+swwM= -github.com/juju/qthttptest v0.1.3/go.mod h1:2gayREyVSs/IovPmwYAtU+HZzuhDjytJQRRLzPTtDYE= github.com/juju/retry v1.0.0 h1:Tb1hFdDSPGLH/BGdYQOF7utQ9lA0ouVJX2imqgJK6tk= github.com/juju/retry v1.0.0/go.mod h1:SssN1eYeK3A2qjnFGTiVMbdzGJ2BfluaJblJXvuvgqA= github.com/juju/testing v1.0.2 h1:OR90RqCd9CJONxXamZAjLknpZdtqDyxqW8IwCbgw3i4= github.com/juju/testing v1.0.2/go.mod h1:h3Vd2rzB57KrdsBEy6R7bmSKPzP76BnNavt7i8PerwQ= github.com/juju/utils/v3 v3.0.0 h1:Gg3n63mGPbBuoXCo+EPJuMi44hGZfloI8nlCIebHu2Q= github.com/juju/utils/v3 v3.0.0/go.mod h1:8csUcj1VRkfjNIRzBFWzLFCMLwLqsRWvkmhfVAUwbC4= -github.com/juju/webbrowser v1.0.0 h1:JLdmbFtCGY6Qf2jmS6bVaenJFGIFkdF1/BjUm76af78= -github.com/juju/webbrowser v1.0.0/go.mod h1:RwVlbBcF91Q4vS+iwlkJ6bZTE3EwlrjbYlM3WMVD6Bc= -github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce h1:3zb1HRvOAHOMZ8VGTDEBkKpCUVlF28zalZcb7RFjMnE= -github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce/go.mod h1:JJ1ShHzaOzMzU0B5TNcdI9+vq8Y45ijVeNYxE1wJ8zM= +github.com/lunixbochs/vtclean v0.0.0-20160125035106-4fbf7632a2c6/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -238,6 +190,8 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.6/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.0-20160806122752-66b8e73f3f5c/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= @@ -264,8 +218,6 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= -github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -273,16 +225,11 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= -github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= -github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= -github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= -github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= @@ -292,22 +239,15 @@ github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= -github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.7.1-0.20230723113155-fd865a44e3c4 h1:6be13R0JVLZN659yPzYYO0O1nYeSByDy5eqi85JKG/Y= github.com/spf13/cobra v1.7.1-0.20230723113155-fd865a44e3c4/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= @@ -339,7 +279,6 @@ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23n github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= @@ -351,39 +290,22 @@ go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvx go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= -golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -392,7 +314,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -401,22 +322,16 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -425,38 +340,13 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -464,25 +354,15 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY= gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk= -gopkg.in/errgo.v1 v1.0.1 h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso= -gopkg.in/errgo.v1 v1.0.1/go.mod h1:3NjfXwocQRYAPTq4/fzX+CwUhPRcR/azYRhj8G+LqMo= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/httprequest.v1 v1.2.1 h1:pEPLMdF/gjWHnKxLpuCYaHFjc8vAB2wrYjXrqDVC16E= -gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM= -gopkg.in/macaroon.v2 v2.1.0 h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI= -gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -506,5 +386,3 @@ gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= gorm.io/gorm v1.24.6 h1:wy98aq9oFEetsc4CAbKD2SoBCdMzsbSIvSUUFJuHi5s= gorm.io/gorm v1.24.6/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/runner/providers/lxd/images.go b/runner/providers/lxd/images.go deleted file mode 100644 index faf88b98..00000000 --- a/runner/providers/lxd/images.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2022 Cloudbase Solutions SRL -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package lxd - -import ( - "fmt" - "strings" - - runnerErrors "github.com/cloudbase/garm-provider-common/errors" - "github.com/cloudbase/garm/config" - - lxd "github.com/lxc/lxd/client" - "github.com/lxc/lxd/shared/api" - "github.com/pkg/errors" -) - -type image struct { - remotes map[string]config.LXDImageRemote -} - -// parseImageName parses the image name that comes in from the config and returns a -// remote. If no remote is configured with the given name, an error is returned. -func (i *image) parseImageName(imageName string) (config.LXDImageRemote, string, error) { - if !strings.Contains(imageName, ":") { - return config.LXDImageRemote{}, "", fmt.Errorf("image does not include a remote") - } - - details := strings.SplitN(imageName, ":", 2) - for remoteName, val := range i.remotes { - if remoteName == details[0] { - return val, details[1], nil - } - } - return config.LXDImageRemote{}, "", runnerErrors.ErrNotFound -} - -func (i *image) getLocalImageByAlias(imageName string, imageType config.LXDImageType, arch string, cli lxd.InstanceServer) (*api.Image, error) { - aliases, err := cli.GetImageAliasArchitectures(imageType.String(), imageName) - if err != nil { - return nil, errors.Wrapf(err, "resolving alias: %s", imageName) - } - - alias, ok := aliases[arch] - if !ok { - return nil, fmt.Errorf("no image found for arch %s and image type %s with name %s", arch, imageType, imageName) - } - - image, _, err := cli.GetImage(alias.Target) - if err != nil { - return nil, errors.Wrap(err, "fetching image details") - } - return image, nil -} - -func (i *image) getInstanceSource(imageName string, imageType config.LXDImageType, arch string, cli lxd.InstanceServer) (api.InstanceSource, error) { - instanceSource := api.InstanceSource{ - Type: "image", - } - if !strings.Contains(imageName, ":") { - // A remote was not specified, try to find an image using the imageName as - // an alias. - imageDetails, err := i.getLocalImageByAlias(imageName, imageType, arch, cli) - if err != nil { - return api.InstanceSource{}, errors.Wrap(err, "fetching image") - } - instanceSource.Fingerprint = imageDetails.Fingerprint - } else { - remote, parsedName, err := i.parseImageName(imageName) - if err != nil { - return api.InstanceSource{}, errors.Wrap(err, "parsing image name") - } - instanceSource.Alias = parsedName - instanceSource.Server = remote.Address - instanceSource.Protocol = string(remote.Protocol) - } - return instanceSource, nil -} diff --git a/runner/providers/lxd/lxd.go b/runner/providers/lxd/lxd.go deleted file mode 100644 index f5aba1b6..00000000 --- a/runner/providers/lxd/lxd.go +++ /dev/null @@ -1,530 +0,0 @@ -// Copyright 2022 Cloudbase Solutions SRL -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package lxd - -import ( - "context" - "fmt" - "log" - "sync" - "time" - - runnerErrors "github.com/cloudbase/garm-provider-common/errors" - "github.com/cloudbase/garm/config" - "github.com/cloudbase/garm/params" - "github.com/cloudbase/garm/runner/common" - - lxd "github.com/lxc/lxd/client" - "github.com/lxc/lxd/shared/api" - "github.com/pkg/errors" - - "github.com/cloudbase/garm-provider-common/cloudconfig" - commonParams "github.com/cloudbase/garm-provider-common/params" -) - -var _ common.Provider = &LXD{} - -const ( - // We look for this key in the config of the instances to determine if they are - // created by us or not. - controllerIDKeyName = "user.runner-controller-id" - poolIDKey = "user.runner-pool-id" - - // osTypeKeyName is the key we use in the instance config to indicate the OS - // platform a runner is supposed to have. This value is defined in the pool and - // passed into the provider as bootstrap params. - osTypeKeyName = "user.os-type" - - // osArchKeyNAme is the key we use in the instance config to indicate the OS - // architecture a runner is supposed to have. This value is defined in the pool and - // passed into the provider as bootstrap params. - osArchKeyNAme = "user.os-arch" -) - -var ( - // lxdToGithubArchMap translates LXD architectures to Github tools architectures. - // TODO: move this in a separate package. This will most likely be used - // by any other provider. - lxdToGithubArchMap map[string]string = map[string]string{ - "x86_64": "x64", - "amd64": "x64", - "armv7l": "arm", - "aarch64": "arm64", - "x64": "x64", - "arm": "arm", - "arm64": "arm64", - } - - configToLXDArchMap map[commonParams.OSArch]string = map[commonParams.OSArch]string{ - commonParams.Amd64: "x86_64", - commonParams.Arm64: "aarch64", - commonParams.Arm: "armv7l", - } - - lxdToConfigArch map[string]commonParams.OSArch = map[string]commonParams.OSArch{ - "x86_64": commonParams.Amd64, - "aarch64": commonParams.Arm64, - "armv7l": commonParams.Arm, - } -) - -const ( - DefaultProjectDescription = "This project was created automatically by garm to be used for github ephemeral action runners." - DefaultProjectName = "garm-project" -) - -func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) { - if err := cfg.Validate(); err != nil { - return nil, errors.Wrap(err, "validating provider config") - } - - if cfg.ProviderType != params.LXDProvider { - return nil, fmt.Errorf("invalid provider type %s, expected %s", cfg.ProviderType, params.LXDProvider) - } - - provider := &LXD{ - ctx: ctx, - cfg: cfg, - controllerID: controllerID, - imageManager: &image{ - remotes: cfg.LXD.ImageRemotes, - }, - } - - return provider, nil -} - -type LXD struct { - // cfg is the provider config for this provider. - cfg *config.Provider - // ctx is the context. - ctx context.Context - // cli is the LXD client. - cli lxd.InstanceServer - // imageManager downloads images from remotes - imageManager *image - // controllerID is the ID of this controller - controllerID string - - mux sync.Mutex -} - -func (l *LXD) getCLI() (lxd.InstanceServer, error) { - l.mux.Lock() - defer l.mux.Unlock() - - if l.cli != nil { - return l.cli, nil - } - cli, err := getClientFromConfig(l.ctx, &l.cfg.LXD) - if err != nil { - return nil, errors.Wrap(err, "creating LXD client") - } - - _, _, err = cli.GetProject(projectName(l.cfg.LXD)) - if err != nil { - return nil, errors.Wrapf(err, "fetching project name: %s", projectName(l.cfg.LXD)) - } - cli = cli.UseProject(projectName(l.cfg.LXD)) - l.cli = cli - - return cli, nil -} - -func (l *LXD) getProfiles(flavor string) ([]string, error) { - ret := []string{} - if l.cfg.LXD.IncludeDefaultProfile { - ret = append(ret, "default") - } - - set := map[string]struct{}{} - - cli, err := l.getCLI() - if err != nil { - return nil, errors.Wrap(err, "fetching client") - } - - profiles, err := cli.GetProfileNames() - if err != nil { - return nil, errors.Wrap(err, "fetching profile names") - } - for _, profile := range profiles { - set[profile] = struct{}{} - } - - if _, ok := set[flavor]; !ok { - return nil, errors.Wrapf(runnerErrors.ErrNotFound, "looking for profile %s", flavor) - } - - ret = append(ret, flavor) - return ret, nil -} - -func (l *LXD) getTools(tools []commonParams.RunnerApplicationDownload, osType commonParams.OSType, architecture string) (commonParams.RunnerApplicationDownload, error) { - // Validate image OS. Linux only for now. - switch osType { - case commonParams.Linux: - default: - return commonParams.RunnerApplicationDownload{}, fmt.Errorf("this provider does not support OS type: %s", osType) - } - - // Find tools for OS/Arch. - for _, tool := range tools { - if tool.GetOS() == "" || tool.GetArchitecture() == "" { - continue - } - - // fmt.Println(*tool.Architecture, *tool.OS) - // fmt.Printf("image arch: %s --> osType: %s\n", image.Architecture, string(osType)) - if tool.GetArchitecture() == architecture && tool.GetOS() == string(osType) { - return tool, nil - } - - arch, ok := lxdToGithubArchMap[architecture] - if ok && arch == tool.GetArchitecture() && tool.GetOS() == string(osType) { - return tool, nil - } - } - return commonParams.RunnerApplicationDownload{}, fmt.Errorf("failed to find tools for OS %s and arch %s", osType, architecture) -} - -// sadly, the security.secureboot flag is a string encoded boolean. -func (l *LXD) secureBootEnabled() string { - if l.cfg.LXD.SecureBoot { - return "true" - } - return "false" -} - -func (l *LXD) getCreateInstanceArgs(bootstrapParams commonParams.BootstrapInstance, specs extraSpecs) (api.InstancesPost, error) { - if bootstrapParams.Name == "" { - return api.InstancesPost{}, runnerErrors.NewBadRequestError("missing name") - } - profiles, err := l.getProfiles(bootstrapParams.Flavor) - if err != nil { - return api.InstancesPost{}, errors.Wrap(err, "fetching profiles") - } - - arch, err := resolveArchitecture(bootstrapParams.OSArch) - if err != nil { - return api.InstancesPost{}, errors.Wrap(err, "fetching archictecture") - } - - instanceType := l.cfg.LXD.GetInstanceType() - instanceSource, err := l.imageManager.getInstanceSource(bootstrapParams.Image, instanceType, arch, l.cli) - if err != nil { - return api.InstancesPost{}, errors.Wrap(err, "getting instance source") - } - - tools, err := l.getTools(bootstrapParams.Tools, bootstrapParams.OSType, arch) - if err != nil { - return api.InstancesPost{}, errors.Wrap(err, "getting tools") - } - - bootstrapParams.UserDataOptions.DisableUpdatesOnBoot = specs.DisableUpdates - bootstrapParams.UserDataOptions.ExtraPackages = specs.ExtraPackages - bootstrapParams.UserDataOptions.EnableBootDebug = specs.EnableBootDebug - cloudCfg, err := cloudconfig.GetCloudConfig(bootstrapParams, tools, bootstrapParams.Name) - if err != nil { - return api.InstancesPost{}, errors.Wrap(err, "generating cloud-config") - } - - configMap := map[string]string{ - "user.user-data": cloudCfg, - osTypeKeyName: string(bootstrapParams.OSType), - osArchKeyNAme: string(bootstrapParams.OSArch), - controllerIDKeyName: l.controllerID, - poolIDKey: bootstrapParams.PoolID, - } - - if instanceType == config.LXDImageVirtualMachine { - configMap["security.secureboot"] = l.secureBootEnabled() - } - - args := api.InstancesPost{ - InstancePut: api.InstancePut{ - Architecture: arch, - Profiles: profiles, - Description: "Github runner provisioned by garm", - Config: configMap, - }, - Source: instanceSource, - Name: bootstrapParams.Name, - Type: api.InstanceType(instanceType), - } - return args, nil -} - -func (l *LXD) AsParams() params.Provider { - return params.Provider{ - Name: l.cfg.Name, - ProviderType: l.cfg.ProviderType, - Description: l.cfg.Description, - } -} - -func (l *LXD) launchInstance(createArgs api.InstancesPost) error { - cli, err := l.getCLI() - if err != nil { - return errors.Wrap(err, "fetching client") - } - // Get LXD to create the instance (background operation) - op, err := cli.CreateInstance(createArgs) - if err != nil { - return errors.Wrap(err, "creating instance") - } - - // Wait for the operation to complete - err = op.Wait() - if err != nil { - return errors.Wrap(err, "waiting for instance creation") - } - - // Get LXD to start the instance (background operation) - reqState := api.InstanceStatePut{ - Action: "start", - Timeout: -1, - } - - op, err = cli.UpdateInstanceState(createArgs.Name, reqState, "") - if err != nil { - return errors.Wrap(err, "starting instance") - } - - // Wait for the operation to complete - err = op.Wait() - if err != nil { - return errors.Wrap(err, "waiting for instance to start") - } - return nil -} - -// CreateInstance creates a new compute instance in the provider. -func (l *LXD) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance) (commonParams.ProviderInstance, error) { - extraSpecs, err := parseExtraSpecsFromBootstrapParams(bootstrapParams) - if err != nil { - return commonParams.ProviderInstance{}, errors.Wrap(err, "parsing extra specs") - } - args, err := l.getCreateInstanceArgs(bootstrapParams, extraSpecs) - if err != nil { - return commonParams.ProviderInstance{}, errors.Wrap(err, "fetching create args") - } - - if err := l.launchInstance(args); err != nil { - return commonParams.ProviderInstance{}, errors.Wrap(err, "creating instance") - } - - ret, err := l.waitInstanceHasIP(ctx, args.Name) - if err != nil { - return commonParams.ProviderInstance{}, errors.Wrap(err, "fetching instance") - } - - return ret, nil -} - -// GetInstance will return details about one instance. -func (l *LXD) GetInstance(ctx context.Context, instanceName string) (commonParams.ProviderInstance, error) { - cli, err := l.getCLI() - if err != nil { - return commonParams.ProviderInstance{}, errors.Wrap(err, "fetching client") - } - instance, _, err := cli.GetInstanceFull(instanceName) - if err != nil { - if isNotFoundError(err) { - return commonParams.ProviderInstance{}, errors.Wrapf(runnerErrors.ErrNotFound, "fetching instance: %q", err) - } - return commonParams.ProviderInstance{}, errors.Wrap(err, "fetching instance") - } - - return lxdInstanceToAPIInstance(instance), nil -} - -// Delete instance will delete the instance in a provider. -func (l *LXD) DeleteInstance(ctx context.Context, instance string) error { - cli, err := l.getCLI() - if err != nil { - return errors.Wrap(err, "fetching client") - } - - if err := l.setState(instance, "stop", true); err != nil { - if isNotFoundError(err) { - log.Printf("received not found error when stopping instance %s", instance) - return nil - } - // I am not proud of this, but the drivers.ErrInstanceIsStopped from LXD pulls in - // a ton of CGO, linux specific dependencies, that don't make sense having - // in garm. - if !(errors.Cause(err).Error() == errInstanceIsStopped.Error()) { - return errors.Wrap(err, "stopping instance") - } - } - - opResponse := make(chan struct { - op lxd.Operation - err error - }) - var op lxd.Operation - go func() { - op, err := cli.DeleteInstance(instance) - opResponse <- struct { - op lxd.Operation - err error - }{op: op, err: err} - }() - - select { - case resp := <-opResponse: - if resp.err != nil { - if isNotFoundError(resp.err) { - log.Printf("received not found error when deleting instance %s", instance) - return nil - } - return errors.Wrap(resp.err, "removing instance") - } - op = resp.op - case <-time.After(time.Second * 60): - return errors.Wrapf(runnerErrors.ErrTimeout, "removing instance %s", instance) - } - - opTimeout, cancel := context.WithTimeout(context.Background(), time.Second*60) - defer cancel() - err = op.WaitContext(opTimeout) - if err != nil { - if isNotFoundError(err) { - log.Printf("received not found error when waiting for instance deletion %s", instance) - return nil - } - return errors.Wrap(err, "waiting for instance deletion") - } - return nil -} - -type listResponse struct { - instances []api.InstanceFull - err error -} - -// ListInstances will list all instances for a provider. -func (l *LXD) ListInstances(ctx context.Context, poolID string) ([]commonParams.ProviderInstance, error) { - cli, err := l.getCLI() - if err != nil { - return []commonParams.ProviderInstance{}, errors.Wrap(err, "fetching client") - } - - result := make(chan listResponse, 1) - - go func() { - // TODO(gabriel-samfira): if this blocks indefinitely, we will leak a goroutine. - // Convert the internal provider to an external one. Running the provider as an - // external process will allow us to not care if a goroutine leaks. Once a timeout - // is reached, the provider can just exit with an error. Something we can't do with - // internal providers. - instances, err := cli.GetInstancesFull(api.InstanceTypeAny) - result <- listResponse{ - instances: instances, - err: err, - } - }() - - var instances []api.InstanceFull - select { - case res := <-result: - if res.err != nil { - return []commonParams.ProviderInstance{}, errors.Wrap(res.err, "fetching instances") - } - instances = res.instances - case <-time.After(time.Second * 60): - return []commonParams.ProviderInstance{}, errors.Wrap(runnerErrors.ErrTimeout, "fetching instances from provider") - } - - ret := []commonParams.ProviderInstance{} - - for _, instance := range instances { - if id, ok := instance.ExpandedConfig[controllerIDKeyName]; ok && id == l.controllerID { - if poolID != "" { - id := instance.ExpandedConfig[poolIDKey] - if id != poolID { - // Pool ID was specified. Filter out instances belonging to other pools. - continue - } - } - ret = append(ret, lxdInstanceToAPIInstance(&instance)) - } - } - - return ret, nil -} - -// RemoveAllInstances will remove all instances created by this provider. -func (l *LXD) RemoveAllInstances(ctx context.Context) error { - instances, err := l.ListInstances(ctx, "") - if err != nil { - return errors.Wrap(err, "fetching instance list") - } - - for _, instance := range instances { - // TODO: remove in parallel - if err := l.DeleteInstance(ctx, instance.Name); err != nil { - return errors.Wrapf(err, "removing instance %s", instance.Name) - } - } - - return nil -} - -func (l *LXD) setState(instance, state string, force bool) error { - reqState := api.InstanceStatePut{ - Action: state, - Timeout: -1, - Force: force, - } - - cli, err := l.getCLI() - if err != nil { - return errors.Wrap(err, "fetching client") - } - - op, err := cli.UpdateInstanceState(instance, reqState, "") - if err != nil { - return errors.Wrapf(err, "setting state to %s", state) - } - ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second*60) - defer cancel() - err = op.WaitContext(ctxTimeout) - if err != nil { - return errors.Wrapf(err, "waiting for instance to transition to state %s", state) - } - return nil -} - -// Stop shuts down the instance. -func (l *LXD) Stop(ctx context.Context, instance string, force bool) error { - return l.setState(instance, "stop", force) -} - -// Start boots up an instance. -func (l *LXD) Start(ctx context.Context, instance string) error { - return l.setState(instance, "start", false) -} - -// DisableJITConfig tells us if the provider explicitly disables JIT configuration and -// forces runner registration tokens to be used. This may happen if a provider has not yet -// been updated to support JIT configuration. -func (l *LXD) DisableJITConfig() bool { - if l.cfg == nil { - return false - } - return l.cfg.DisableJITConfig -} diff --git a/runner/providers/lxd/specs.go b/runner/providers/lxd/specs.go deleted file mode 100644 index 5aefe7c3..00000000 --- a/runner/providers/lxd/specs.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2023 Cloudbase Solutions SRL -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package lxd - -import ( - "encoding/json" - - commonParams "github.com/cloudbase/garm-provider-common/params" - "github.com/pkg/errors" -) - -type extraSpecs struct { - DisableUpdates bool `json:"disable_updates"` - ExtraPackages []string `json:"extra_packages"` - EnableBootDebug bool `json:"enable_boot_debug"` -} - -func parseExtraSpecsFromBootstrapParams(bootstrapParams commonParams.BootstrapInstance) (extraSpecs, error) { - specs := extraSpecs{} - if bootstrapParams.ExtraSpecs == nil { - return specs, nil - } - - if err := json.Unmarshal(bootstrapParams.ExtraSpecs, &specs); err != nil { - return specs, errors.Wrap(err, "unmarshaling extra specs") - } - return specs, nil -} diff --git a/runner/providers/lxd/util.go b/runner/providers/lxd/util.go deleted file mode 100644 index 2168bcec..00000000 --- a/runner/providers/lxd/util.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2022 Cloudbase Solutions SRL -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package lxd - -import ( - "context" - "database/sql" - "fmt" - "log" - "net" - "net/http" - "os" - "strings" - "time" - - commonParams "github.com/cloudbase/garm-provider-common/params" - - "github.com/cloudbase/garm-provider-common/util" - "github.com/cloudbase/garm/config" - - "github.com/juju/clock" - "github.com/juju/retry" - lxd "github.com/lxc/lxd/client" - "github.com/lxc/lxd/shared/api" - "github.com/pkg/errors" -) - -var ( - //lint:ignore ST1005 imported error from lxd - errInstanceIsStopped error = fmt.Errorf("The instance is already stopped") -) - -var httpResponseErrors = map[int][]error{ - http.StatusNotFound: {os.ErrNotExist, sql.ErrNoRows}, -} - -// isNotFoundError returns true if the error is considered a Not Found error. -func isNotFoundError(err error) bool { - if api.StatusErrorCheck(err, http.StatusNotFound) { - return true - } - - for _, checkErr := range httpResponseErrors[http.StatusNotFound] { - if errors.Is(err, checkErr) { - return true - } - } - - return false -} - -func lxdInstanceToAPIInstance(instance *api.InstanceFull) commonParams.ProviderInstance { - lxdOS, ok := instance.ExpandedConfig["image.os"] - if !ok { - log.Printf("failed to find OS in instance config") - } - - osType, err := util.OSToOSType(lxdOS) - if err != nil { - log.Printf("failed to find OS type for OS %s", lxdOS) - } - - if osType == "" { - osTypeFromTag, ok := instance.ExpandedConfig[osTypeKeyName] - if !ok { - log.Printf("failed to find OS type in fallback location") - } - osType = commonParams.OSType(osTypeFromTag) - } - - osRelease, ok := instance.ExpandedConfig["image.release"] - if !ok { - log.Printf("failed to find OS release instance config") - } - - state := instance.State - addresses := []commonParams.Address{} - if state.Network != nil { - for _, details := range state.Network { - for _, addr := range details.Addresses { - if addr.Scope != "global" { - continue - } - addresses = append(addresses, commonParams.Address{ - Address: addr.Address, - Type: commonParams.PublicAddress, - }) - } - } - } - - instanceArch, ok := lxdToConfigArch[instance.Architecture] - if !ok { - log.Printf("failed to find OS architecture") - } - - return commonParams.ProviderInstance{ - OSArch: instanceArch, - ProviderID: instance.Name, - Name: instance.Name, - OSType: osType, - OSName: strings.ToLower(lxdOS), - OSVersion: osRelease, - Addresses: addresses, - Status: lxdStatusToProviderStatus(state.Status), - } -} - -func lxdStatusToProviderStatus(status string) commonParams.InstanceStatus { - switch status { - case "Running": - return commonParams.InstanceRunning - case "Stopped": - return commonParams.InstanceStopped - default: - return commonParams.InstanceStatusUnknown - } -} - -func getClientFromConfig(ctx context.Context, cfg *config.LXD) (cli lxd.InstanceServer, err error) { - if cfg.UnixSocket != "" { - return lxd.ConnectLXDUnixWithContext(ctx, cfg.UnixSocket, nil) - } - - var srvCrtContents, tlsCAContents, clientCertContents, clientKeyContents []byte - - if cfg.TLSServerCert != "" { - srvCrtContents, err = os.ReadFile(cfg.TLSServerCert) - if err != nil { - return nil, errors.Wrap(err, "reading TLSServerCert") - } - } - - if cfg.TLSCA != "" { - tlsCAContents, err = os.ReadFile(cfg.TLSCA) - if err != nil { - return nil, errors.Wrap(err, "reading TLSCA") - } - } - - if cfg.ClientCertificate != "" { - clientCertContents, err = os.ReadFile(cfg.ClientCertificate) - if err != nil { - return nil, errors.Wrap(err, "reading ClientCertificate") - } - } - - if cfg.ClientKey != "" { - clientKeyContents, err = os.ReadFile(cfg.ClientKey) - if err != nil { - return nil, errors.Wrap(err, "reading ClientKey") - } - } - - connectArgs := lxd.ConnectionArgs{ - TLSServerCert: string(srvCrtContents), - TLSCA: string(tlsCAContents), - TLSClientCert: string(clientCertContents), - TLSClientKey: string(clientKeyContents), - } - - lxdCLI, err := lxd.ConnectLXD(cfg.URL, &connectArgs) - if err != nil { - return nil, errors.Wrap(err, "connecting to LXD") - } - - return lxdCLI, nil -} - -func projectName(cfg config.LXD) string { - if cfg.ProjectName != "" { - return cfg.ProjectName - } - return DefaultProjectName -} - -func resolveArchitecture(osArch commonParams.OSArch) (string, error) { - if string(osArch) == "" { - return configToLXDArchMap[commonParams.Amd64], nil - } - arch, ok := configToLXDArchMap[osArch] - if !ok { - return "", fmt.Errorf("architecture %s is not supported", osArch) - } - return arch, nil -} - -// waitDeviceActive is a function capable of figuring out when a Equinix Metal -// device is active -func (l *LXD) waitInstanceHasIP(ctx context.Context, instanceName string) (commonParams.ProviderInstance, error) { - var p commonParams.ProviderInstance - var errIPNotFound error = fmt.Errorf("ip not found") - err := retry.Call(retry.CallArgs{ - Func: func() error { - var err error - p, err = l.GetInstance(ctx, instanceName) - if err != nil { - return errors.Wrap(err, "fetching instance") - } - for _, addr := range p.Addresses { - ip := net.ParseIP(addr.Address) - if ip == nil { - continue - } - if ip.To4() == nil { - continue - } - return nil - } - return errIPNotFound - }, - Attempts: 20, - Delay: 5 * time.Second, - Clock: clock.WallClock, - }) - - if err != nil && err != errIPNotFound { - return commonParams.ProviderInstance{}, err - } - - return p, nil -} diff --git a/runner/providers/providers.go b/runner/providers/providers.go index 2e1f0d1b..5d4e58f1 100644 --- a/runner/providers/providers.go +++ b/runner/providers/providers.go @@ -22,7 +22,6 @@ import ( "github.com/cloudbase/garm/params" "github.com/cloudbase/garm/runner/common" "github.com/cloudbase/garm/runner/providers/external" - "github.com/cloudbase/garm/runner/providers/lxd" "github.com/pkg/errors" ) @@ -34,13 +33,6 @@ func LoadProvidersFromConfig(ctx context.Context, cfg config.Config, controllerI for _, providerCfg := range cfg.Providers { log.Printf("Loading provider %s", providerCfg.Name) switch providerCfg.ProviderType { - case params.LXDProvider: - conf := providerCfg - provider, err := lxd.NewProvider(ctx, &conf, controllerID) - if err != nil { - return nil, errors.Wrap(err, "creating provider") - } - providers[providerCfg.Name] = provider case params.ExternalProvider: conf := providerCfg provider, err := external.NewProvider(ctx, &conf, controllerID) @@ -48,6 +40,8 @@ func LoadProvidersFromConfig(ctx context.Context, cfg config.Config, controllerI return nil, errors.Wrap(err, "creating provider") } providers[providerCfg.Name] = provider + default: + return nil, errors.Errorf("unknown provider type %s", providerCfg.ProviderType) } } return providers, nil diff --git a/test/integration/config/config.toml b/test/integration/config/config.toml index 1a67a409..8d6d3e36 100644 --- a/test/integration/config/config.toml +++ b/test/integration/config/config.toml @@ -25,30 +25,11 @@ passphrase = "${DB_PASSPHRASE}" [[provider]] name = "lxd_local" -provider_type = "lxd" +provider_type = "external" description = "Local LXD installation" -[provider.lxd] - unix_socket_path = "/var/snap/lxd/common/lxd/unix.socket" - include_default_profile = false - instance_type = "container" - secure_boot = false - project_name = "default" - [provider.lxd.image_remotes] - [provider.lxd.image_remotes.ubuntu] - addr = "https://cloud-images.ubuntu.com/releases" - public = true - protocol = "simplestreams" - skip_verify = false - [provider.lxd.image_remotes.ubuntu_daily] - addr = "https://cloud-images.ubuntu.com/daily" - public = true - protocol = "simplestreams" - skip_verify = false - [provider.lxd.image_remotes.images] - addr = "https://images.linuxcontainers.org" - public = true - protocol = "simplestreams" - skip_verify = false + [provider.external] + provider_executable = "${LXD_PROVIDER_EXECUTABLE}" + config_file = "${LXD_PROVIDER_CONFIG}" [[provider]] name = "test_external" diff --git a/test/integration/config/garm-provider-lxd.toml b/test/integration/config/garm-provider-lxd.toml new file mode 100644 index 00000000..9a6f94c8 --- /dev/null +++ b/test/integration/config/garm-provider-lxd.toml @@ -0,0 +1,21 @@ +unix_socket_path = "/var/snap/lxd/common/lxd/unix.socket" +include_default_profile = false +instance_type = "container" +secure_boot = false +project_name = "default" +[image_remotes] + [image_remotes.ubuntu] + addr = "https://cloud-images.ubuntu.com/releases" + public = true + protocol = "simplestreams" + skip_verify = false + [image_remotes.ubuntu_daily] + addr = "https://cloud-images.ubuntu.com/daily" + public = true + protocol = "simplestreams" + skip_verify = false + [image_remotes.images] + addr = "https://images.linuxcontainers.org" + public = true + protocol = "simplestreams" + skip_verify = false \ No newline at end of file diff --git a/test/integration/scripts/setup-garm.sh b/test/integration/scripts/setup-garm.sh index 91dddddf..e2bbcee9 100755 --- a/test/integration/scripts/setup-garm.sh +++ b/test/integration/scripts/setup-garm.sh @@ -6,6 +6,9 @@ BINARIES_DIR="$PWD/bin" CONTRIB_DIR="$PWD/contrib" CONFIG_DIR="$PWD/test/integration/config" CONFIG_DIR_PROV="$PWD/test/integration/provider" +PROVIDER_BIN_DIR="/opt/garm/providers.d/lxd" +LXD_PROVIDER_EXECUTABLE="$PROVIDER_BIN_DIR/garm-provider-lxd" +LXD_PROVIDER_CONFIG="$CONFIG_DIR/garm-provider-lxd.toml" if [[ ! -f $BINARIES_DIR/garm ]] || [[ ! -f $BINARIES_DIR/garm-cli ]]; then echo "ERROR: Please build GARM binaries first" @@ -43,6 +46,12 @@ export DB_PASSPHRASE="$(generate_secret)" # Group "adm" is the LXD daemon group as set by the "canonical/setup-lxd" GitHub action. sudo useradd --shell /usr/bin/false --system --groups adm --no-create-home garm +sudo mkdir -p $PROVIDER_BIN_DIR +git clone https://github.com/cloudbase/garm-provider-lxd ~/garm-provider-lxd +pushd ~/garm-provider-lxd +go build -o $PROVIDER_BIN_DIR/garm-provider-lxd +popd + sudo mkdir -p /etc/garm cat $CONFIG_DIR/config.toml | envsubst | sudo tee /etc/garm/config.toml sudo chown -R garm:garm /etc/garm diff --git a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/cloudconfig.go b/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/cloudconfig.go deleted file mode 100644 index fe468ec6..00000000 --- a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/cloudconfig.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2022 Cloudbase Solutions SRL -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package cloudconfig - -import ( - "crypto/x509" - "encoding/base64" - "fmt" - "strings" - "sync" - - "github.com/cloudbase/garm-provider-common/defaults" - - "github.com/pkg/errors" - "gopkg.in/yaml.v3" -) - -func NewDefaultCloudInitConfig() *CloudInit { - return &CloudInit{ - PackageUpgrade: true, - Packages: []string{ - "curl", - "tar", - }, - SystemInfo: &SystemInfo{ - DefaultUser: DefaultUser{ - Name: defaults.DefaultUser, - Home: fmt.Sprintf("/home/%s", defaults.DefaultUser), - Shell: defaults.DefaultUserShell, - Groups: defaults.DefaultUserGroups, - Sudo: "ALL=(ALL) NOPASSWD:ALL", - }, - }, - } -} - -type DefaultUser struct { - Name string `yaml:"name"` - Home string `yaml:"home"` - Shell string `yaml:"shell"` - Groups []string `yaml:"groups,omitempty"` - Sudo string `yaml:"sudo"` -} - -type SystemInfo struct { - DefaultUser DefaultUser `yaml:"default_user"` -} - -type File struct { - Encoding string `yaml:"encoding"` - Content string `yaml:"content"` - Owner string `yaml:"owner"` - Path string `yaml:"path"` - Permissions string `yaml:"permissions"` -} - -type CloudInit struct { - mux sync.Mutex - - PackageUpgrade bool `yaml:"package_upgrade"` - Packages []string `yaml:"packages,omitempty"` - SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys,omitempty"` - SystemInfo *SystemInfo `yaml:"system_info,omitempty"` - RunCmd []string `yaml:"runcmd,omitempty"` - WriteFiles []File `yaml:"write_files,omitempty"` - CACerts CACerts `yaml:"ca-certs,omitempty"` -} - -type CACerts struct { - RemoveDefaults bool `yaml:"remove-defaults"` - Trusted []string `yaml:"trusted"` -} - -func (c *CloudInit) AddCACert(cert []byte) error { - c.mux.Lock() - defer c.mux.Unlock() - - if cert == nil { - return nil - } - - roots := x509.NewCertPool() - if ok := roots.AppendCertsFromPEM(cert); !ok { - return fmt.Errorf("failed to parse CA cert bundle") - } - c.CACerts.Trusted = append(c.CACerts.Trusted, string(cert)) - - return nil -} - -func (c *CloudInit) AddSSHKey(keys ...string) { - c.mux.Lock() - defer c.mux.Unlock() - - // TODO(gabriel-samfira): Validate the SSH public key. - for _, key := range keys { - found := false - for _, val := range c.SSHAuthorizedKeys { - if val == key { - found = true - break - } - } - if !found { - c.SSHAuthorizedKeys = append(c.SSHAuthorizedKeys, key) - } - } -} - -func (c *CloudInit) AddPackage(pkgs ...string) { - c.mux.Lock() - defer c.mux.Unlock() - - for _, pkg := range pkgs { - found := false - for _, val := range c.Packages { - if val == pkg { - found = true - break - } - } - if !found { - c.Packages = append(c.Packages, pkg) - } - } -} - -func (c *CloudInit) AddRunCmd(cmd string) { - c.mux.Lock() - defer c.mux.Unlock() - - c.RunCmd = append(c.RunCmd, cmd) -} - -func (c *CloudInit) AddFile(contents []byte, path, owner, permissions string) { - c.mux.Lock() - defer c.mux.Unlock() - - for _, val := range c.WriteFiles { - if val.Path == path { - return - } - } - - file := File{ - Encoding: "b64", - Content: base64.StdEncoding.EncodeToString(contents), - Owner: owner, - Permissions: permissions, - Path: path, - } - c.WriteFiles = append(c.WriteFiles, file) -} - -func (c *CloudInit) Serialize() (string, error) { - c.mux.Lock() - defer c.mux.Unlock() - - ret := []string{ - "#cloud-config", - } - - asYaml, err := yaml.Marshal(c) - if err != nil { - return "", errors.Wrap(err, "marshaling to yaml") - } - - ret = append(ret, string(asYaml)) - return strings.Join(ret, "\n"), nil -} diff --git a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/templates.go b/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/templates.go deleted file mode 100644 index f813e5ff..00000000 --- a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/templates.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright 2022 Cloudbase Solutions SRL -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package cloudconfig - -import ( - "bytes" - "fmt" - "text/template" - - "github.com/cloudbase/garm-provider-common/params" - "github.com/pkg/errors" -) - -var CloudConfigTemplate = `#!/bin/bash - -set -e -set -o pipefail - -{{- if .EnableBootDebug }} -set -x -{{- end }} - -CALLBACK_URL="{{ .CallbackURL }}" -METADATA_URL="{{ .MetadataURL }}" -BEARER_TOKEN="{{ .CallbackToken }}" - -if [ -z "$METADATA_URL" ];then - echo "no token is available and METADATA_URL is not set" - exit 1 -fi - -function call() { - PAYLOAD="$1" - [[ $CALLBACK_URL =~ ^(.*)/status$ ]] || CALLBACK_URL="${CALLBACK_URL}/status" - curl --retry 5 --retry-delay 5 --retry-connrefused --fail -s -X POST -d "${PAYLOAD}" -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${CALLBACK_URL}" || echo "failed to call home: exit code ($?)" -} - -function sendStatus() { - MSG="$1" - call "{\"status\": \"installing\", \"message\": \"$MSG\"}" -} - -{{- if .UseJITConfig }} -function success() { - MSG="$1" - call "{\"status\": \"idle\", \"message\": \"$MSG\"}" -} -{{- else}} -function success() { - MSG="$1" - ID=$2 - call "{\"status\": \"idle\", \"message\": \"$MSG\", \"agent_id\": $ID}" -} -{{- end}} - -function fail() { - MSG="$1" - call "{\"status\": \"failed\", \"message\": \"$MSG\"}" - exit 1 -} - -# This will echo the version number in the filename. Given a file name like: actions-runner-osx-x64-2.299.1.tar.gz -# this will output: 2.299.1 -function getRunnerVersion() { - FILENAME="{{ .FileName }}" - [[ $FILENAME =~ ([0-9]+\.[0-9]+\.[0-9+]) ]] - echo $BASH_REMATCH -} - -function getCachedToolsPath() { - CACHED_RUNNER="/opt/cache/actions-runner/latest" - if [ -d "$CACHED_RUNNER" ];then - echo "$CACHED_RUNNER" - return 0 - fi - - VERSION=$(getRunnerVersion) - if [ -z "$VERSION" ]; then - return 0 - fi - - CACHED_RUNNER="/opt/cache/actions-runner/$VERSION" - if [ -d "$CACHED_RUNNER" ];then - echo "$CACHED_RUNNER" - return 0 - fi - return 0 -} - -function downloadAndExtractRunner() { - sendStatus "downloading tools from {{ .DownloadURL }}" - if [ ! -z "{{ .TempDownloadToken }}" ]; then - TEMP_TOKEN="Authorization: Bearer {{ .TempDownloadToken }}" - fi - curl --retry 5 --retry-delay 5 --retry-connrefused --fail -L -H "${TEMP_TOKEN}" -o "/home/{{ .RunnerUsername }}/{{ .FileName }}" "{{ .DownloadURL }}" || fail "failed to download tools" - mkdir -p /home/{{ .RunnerUsername }}/actions-runner || fail "failed to create actions-runner folder" - sendStatus "extracting runner" - tar xf "/home/{{ .RunnerUsername }}/{{ .FileName }}" -C /home/{{ .RunnerUsername }}/actions-runner/ || fail "failed to extract runner" - # chown {{ .RunnerUsername }}:{{ .RunnerGroup }} -R /home/{{ .RunnerUsername }}/actions-runner/ || fail "failed to change owner" -} - -CACHED_RUNNER=$(getCachedToolsPath) -if [ -z "$CACHED_RUNNER" ];then - downloadAndExtractRunner - sendStatus "installing dependencies" - cd /home/{{ .RunnerUsername }}/actions-runner - sudo ./bin/installdependencies.sh || fail "failed to install dependencies" -else - sendStatus "using cached runner found in $CACHED_RUNNER" - sudo cp -a "$CACHED_RUNNER" "/home/{{ .RunnerUsername }}/actions-runner" - sudo chown {{ .RunnerUsername }}:{{ .RunnerGroup }} -R "/home/{{ .RunnerUsername }}/actions-runner" || fail "failed to change owner" - cd /home/{{ .RunnerUsername }}/actions-runner -fi - - -sendStatus "configuring runner" -{{- if .UseJITConfig }} -function getRunnerFile() { - curl --retry 5 --retry-delay 5 \ - --retry-connrefused --fail -s \ - -X GET -H 'Accept: application/json' \ - -H "Authorization: Bearer ${BEARER_TOKEN}" \ - "${METADATA_URL}/$1" -o "$2" -} - -sendStatus "downloading JIT credentials" -getRunnerFile "credentials/runner" "/home/{{ .RunnerUsername }}/actions-runner/.runner" || fail "failed to get runner file" -getRunnerFile "credentials/credentials" "/home/{{ .RunnerUsername }}/actions-runner/.credentials" || fail "failed to get credentials file" -getRunnerFile "credentials/credentials_rsaparams" "/home/{{ .RunnerUsername }}/actions-runner/.credentials_rsaparams" || fail "failed to get credentials_rsaparams file" -getRunnerFile "system/service-name" "/home/{{ .RunnerUsername }}/actions-runner/.service" || fail "failed to get service name file" -sed -i 's/$/\.service/' /home/{{ .RunnerUsername }}/actions-runner/.service - -SVC_NAME=$(cat /home/{{ .RunnerUsername }}/actions-runner/.service) - -sendStatus "generating systemd unit file" -getRunnerFile "systemd/unit-file?runAsUser={{ .RunnerUsername }}" "$SVC_NAME" || fail "failed to get service file" -sudo mv $SVC_NAME /etc/systemd/system/ || fail "failed to move service file" - -sendStatus "enabling runner service" -cp /home/{{ .RunnerUsername }}/actions-runner/bin/runsvc.sh /home/{{ .RunnerUsername }}/actions-runner/ || fail "failed to copy runsvc.sh" -sudo chown {{ .RunnerUsername }}:{{ .RunnerGroup }} -R /home/{{ .RunnerUsername }} || fail "failed to change owner" -sudo systemctl daemon-reload || fail "failed to reload systemd" -sudo systemctl enable $SVC_NAME -{{- else}} - -GITHUB_TOKEN=$(curl --retry 5 --retry-delay 5 --retry-connrefused --fail -s -X GET -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${METADATA_URL}/runner-registration-token/") - -set +e -attempt=1 -while true; do - ERROUT=$(mktemp) - {{- if .GitHubRunnerGroup }} - ./config.sh --unattended --url "{{ .RepoURL }}" --token "$GITHUB_TOKEN" --runnergroup {{.GitHubRunnerGroup}} --name "{{ .RunnerName }}" --labels "{{ .RunnerLabels }}" --ephemeral 2>$ERROUT - {{- else}} - ./config.sh --unattended --url "{{ .RepoURL }}" --token "$GITHUB_TOKEN" --name "{{ .RunnerName }}" --labels "{{ .RunnerLabels }}" --ephemeral 2>$ERROUT - {{- end}} - if [ $? -eq 0 ]; then - rm $ERROUT || true - sendStatus "runner successfully configured after $attempt attempt(s)" - break - fi - LAST_ERR=$(cat $ERROUT) - echo "$LAST_ERR" - - # if the runner is already configured, remove it and try again. In the past configuring a runner - # managed to register it but timed out later, resulting in an error. - ./config.sh remove --token "$GITHUB_TOKEN" || true - - if [ $attempt -gt 5 ];then - rm $ERROUT || true - fail "failed to configure runner: $LAST_ERR" - fi - - sendStatus "failed to configure runner (attempt $attempt): $LAST_ERR (retrying in 5 seconds)" - attempt=$((attempt+1)) - rm $ERROUT || true - sleep 5 -done -set -e - -sendStatus "installing runner service" -sudo ./svc.sh install {{ .RunnerUsername }} || fail "failed to install service" -{{- end}} - -if [ -e "/sys/fs/selinux" ];then - sudo chcon -h user_u:object_r:bin_t /home/runner/ || fail "failed to change selinux context" - sudo chcon -R -h {{ .RunnerUsername }}:object_r:bin_t /home/runner/* || fail "failed to change selinux context" -fi - -{{- if .UseJITConfig }} -sudo systemctl start $SVC_NAME || fail "failed to start service" -success "runner successfully installed" -{{- else}} -sendStatus "starting service" -sudo ./svc.sh start || fail "failed to start service" - -set +e -AGENT_ID=$(grep "agentId" /home/{{ .RunnerUsername }}/actions-runner/.runner | tr -d -c 0-9) -if [ $? -ne 0 ];then - fail "failed to get agent ID" -fi -set -e -success "runner successfully installed" $AGENT_ID -{{- end}} -` - -var WindowsSetupScriptTemplate = `#ps1_sysnative -Param( - [Parameter(Mandatory=$false)] - [string]$Token="{{.CallbackToken}}" -) - -$ErrorActionPreference="Stop" - -function Invoke-FastWebRequest { - [CmdletBinding()] - Param( - [Parameter(Mandatory=$True,ValueFromPipeline=$true,Position=0)] - [System.Uri]$Uri, - [Parameter(Position=1)] - [string]$OutFile, - [Hashtable]$Headers=@{}, - [switch]$SkipIntegrityCheck=$false - ) - PROCESS - { - if(!([System.Management.Automation.PSTypeName]'System.Net.Http.HttpClient').Type) - { - $assembly = [System.Reflection.Assembly]::LoadWithPartialName("System.Net.Http") - } - - if(!$OutFile) { - $OutFile = $Uri.PathAndQuery.Substring($Uri.PathAndQuery.LastIndexOf("/") + 1) - if(!$OutFile) { - throw "The ""OutFile"" parameter needs to be specified" - } - } - - $fragment = $Uri.Fragment.Trim('#') - if ($fragment) { - $details = $fragment.Split("=") - $algorithm = $details[0] - $hash = $details[1] - } - - if (!$SkipIntegrityCheck -and $fragment -and (Test-Path $OutFile)) { - try { - return (Test-FileIntegrity -File $OutFile -Algorithm $algorithm -ExpectedHash $hash) - } catch { - Remove-Item $OutFile - } - } - - $client = new-object System.Net.Http.HttpClient - foreach ($k in $Headers.Keys){ - $client.DefaultRequestHeaders.Add($k, $Headers[$k]) - } - $task = $client.GetStreamAsync($Uri) - $response = $task.Result - if($task.IsFaulted) { - $msg = "Request for URL '{0}' is faulted. Task status: {1}." -f @($Uri, $task.Status) - if($task.Exception) { - $msg += "Exception details: {0}" -f @($task.Exception) - } - Throw $msg - } - $outStream = New-Object IO.FileStream $OutFile, Create, Write, None - - try { - $totRead = 0 - $buffer = New-Object Byte[] 1MB - while (($read = $response.Read($buffer, 0, $buffer.Length)) -gt 0) { - $totRead += $read - $outStream.Write($buffer, 0, $read); - } - } - finally { - $outStream.Close() - } - if(!$SkipIntegrityCheck -and $fragment) { - Test-FileIntegrity -File $OutFile -Algorithm $algorithm -ExpectedHash $hash - } - } -} - -function Import-Certificate() { - [CmdletBinding()] - param ( - [parameter(Mandatory=$true)] - $CertificateData, - [parameter(Mandatory=$false)] - [System.Security.Cryptography.X509Certificates.StoreLocation]$StoreLocation="LocalMachine", - [parameter(Mandatory=$false)] - [System.Security.Cryptography.X509Certificates.StoreName]$StoreName="TrustedPublisher" - ) - PROCESS - { - $store = New-Object System.Security.Cryptography.X509Certificates.X509Store( - $StoreName, $StoreLocation) - $store.Open([System.Security.Cryptography.X509Certificates.OpenFlags]::ReadWrite) - $cert = [System.Security.Cryptography.X509Certificates.X509Certificate2]::new($CertificateData) - $store.Add($cert) - } -} - -function Invoke-APICall() { - [CmdletBinding()] - param ( - [parameter(Mandatory=$true)] - [object]$Payload, - [parameter(Mandatory=$true)] - [string]$CallbackURL - ) - PROCESS{ - Invoke-WebRequest -UseBasicParsing -Method Post -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $CallbackURL -Body (ConvertTo-Json $Payload) | Out-Null - } -} - -function Update-GarmStatus() { - [CmdletBinding()] - param ( - [parameter(Mandatory=$true)] - [string]$Message, - [parameter(Mandatory=$false)] - [int64]$AgentID=0, - [parameter(Mandatory=$false)] - [string]$Status="installing", - [parameter(Mandatory=$true)] - [string]$CallbackURL - ) - PROCESS{ - $body = @{ - "status"=$Status - "message"=$Message - } - - if ($AgentID -ne 0) { - $body["AgentID"] = $AgentID - } - Invoke-APICall -Payload $body -CallbackURL $CallbackURL | Out-Null - } -} - -function Invoke-GarmSuccess() { - [CmdletBinding()] - param ( - [parameter(Mandatory=$true)] - [string]$Message, - [parameter(Mandatory=$true)] - [int64]$AgentID, - [parameter(Mandatory=$true)] - [string]$CallbackURL - ) - PROCESS{ - Update-GarmStatus -Message $Message -AgentID $AgentID -CallbackURL $CallbackURL -Status "idle" | Out-Null - } -} - -function Invoke-GarmFailure() { - [CmdletBinding()] - param ( - [parameter(Mandatory=$true)] - [string]$Message, - [parameter(Mandatory=$true)] - [string]$CallbackURL - ) - PROCESS{ - Update-GarmStatus -Message $Message -CallbackURL $CallbackURL -Status "failed" | Out-Null - Throw $Message - } -} - -$GHRunnerGroup = "{{.GitHubRunnerGroup}}" - -function Install-Runner() { - $CallbackURL="{{.CallbackURL}}" - if (!$CallbackURL.EndsWith("/status")) { - $CallbackURL = "$CallbackURL/status" - } - - if ($Token.Length -eq 0) { - Throw "missing callback authentication token" - } - try { - $MetadataURL="{{.MetadataURL}}" - $DownloadURL="{{.DownloadURL}}" - if($MetadataURL -eq ""){ - Throw "missing metadata URL" - } - - $bundle = wget -UseBasicParsing -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $MetadataURL/system/cert-bundle - $converted = ConvertFrom-Json $bundle - foreach ($i in $converted.root_certificates.psobject.Properties){ - $data = [System.Convert]::FromBase64String($i.Value) - Import-Certificate -CertificateData $data -StoreName Root -StoreLocation LocalMachine - } - - Update-GarmStatus -CallbackURL $CallbackURL -Message "downloading tools from $DownloadURL" - - $downloadToken="{{.TempDownloadToken}}" - $DownloadTokenHeaders=@{} - if ($downloadToken.Length -gt 0) { - $DownloadTokenHeaders=@{ - "Authorization"="Bearer $downloadToken" - } - } - $downloadPath = Join-Path $env:TMP {{.FileName}} - Invoke-FastWebRequest -Uri $DownloadURL -OutFile $downloadPath -Headers $DownloadTokenHeaders - - $runnerDir = "C:\runner" - mkdir $runnerDir - - Update-GarmStatus -CallbackURL $CallbackURL -Message "extracting runner" - Add-Type -AssemblyName System.IO.Compression.FileSystem - [System.IO.Compression.ZipFile]::ExtractToDirectory($downloadPath, "$runnerDir") - - Update-GarmStatus -CallbackURL $CallbackURL -Message "configuring and starting runner" - cd $runnerDir - - {{- if .UseJITConfig }} - Update-GarmStatus -CallbackURL $CallbackURL -Message "downloading JIT credentials" - wget -UseBasicParsing -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $MetadataURL/credentials/runner -OutFile (Join-Path $runnerDir ".runner") - wget -UseBasicParsing -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $MetadataURL/credentials/credentials -OutFile (Join-Path $runnerDir ".credentials") - - Add-Type -AssemblyName System.Security - $rsaData = (wget -UseBasicParsing -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $MetadataURL/credentials/credentials_rsaparams) - $encodedBytes = [System.Text.Encoding]::UTF8.GetBytes($rsaData) - $protectedBytes = [Security.Cryptography.ProtectedData]::Protect( $encodedBytes, $null, [Security.Cryptography.DataProtectionScope]::LocalMachine ) - [System.IO.File]::WriteAllBytes((Join-Path $runnerDir ".credentials_rsaparams"), $protectedBytes) - - $serviceNameFile = (Join-Path $runnerDir ".service") - wget -UseBasicParsing -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $MetadataURL/system/service-name -OutFile $serviceNameFile - - Update-GarmStatus -CallbackURL $CallbackURL -Message "Creating system service" - $SVC_NAME=(gc -raw $serviceNameFile) - New-Service -Name "$SVC_NAME" -BinaryPathName "C:\runner\bin\RunnerService.exe" -DisplayName "$SVC_NAME" -Description "GitHub Actions Runner ($SVC_NAME)" -StartupType Automatic - Start-Service "$SVC_NAME" - Update-GarmStatus -Message "runner successfully installed" -CallbackURL $CallbackURL -Status "idle" | Out-Null - - {{- else }} - $GithubRegistrationToken = Invoke-WebRequest -UseBasicParsing -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $MetadataURL/runner-registration-token/ - {{- if .GitHubRunnerGroup }} - ./config.cmd --unattended --url "{{ .RepoURL }}" --token $GithubRegistrationToken --runnergroup {{.GitHubRunnerGroup}} --name "{{ .RunnerName }}" --labels "{{ .RunnerLabels }}" --ephemeral --runasservice - {{- else}} - ./config.cmd --unattended --url "{{ .RepoURL }}" --token $GithubRegistrationToken --name "{{ .RunnerName }}" --labels "{{ .RunnerLabels }}" --ephemeral --runasservice - {{- end}} - - $agentInfoFile = Join-Path $runnerDir ".runner" - $agentInfo = ConvertFrom-Json (gc -raw $agentInfoFile) - Invoke-GarmSuccess -CallbackURL $CallbackURL -Message "runner successfully installed" -AgentID $agentInfo.agentId - {{- end }} - } catch { - Invoke-GarmFailure -CallbackURL $CallbackURL -Message $_ - } -} -Install-Runner -` - -// InstallRunnerParams holds the parameters needed to render the runner install script. -type InstallRunnerParams struct { - // FileName is the name of the file that will be downloaded from the download URL. - // This will be the runner archive downloaded from GitHub. - FileName string - // DownloadURL is the URL from which the runner archive will be downloaded. - DownloadURL string - // RunnerUsername is the username of the user that will run the runner service. - RunnerUsername string - // RunnerGroup is the group of the user that will run the runner service. - RunnerGroup string - // RepoURL is the URL or the github repo the github runner agent needs to configure itself. - RepoURL string - // MetadataURL is the URL where instances can fetch information needed to set themselves up. - // This URL is set in the GARM config file. - MetadataURL string - // RunnerName is the name of the runner. GARM will use this to register the runner with GitHub. - RunnerName string - // RunnerLabels is a comma separated list of labels that will be added to the runner. - RunnerLabels string - // CallbackURL is the URL where the instance can send a post, signaling progress or status. - // This URL is set in the GARM config file. - CallbackURL string - // CallbackToken is the token that needs to be set by the instance in the headers in order to call - // the CallbackURL. - CallbackToken string - // TempDownloadToken is the token that needs to be set by the instance in the headers in order to download - // the githun runner. This is usually needed when using garm against a GHES instance. - TempDownloadToken string - // CABundle is a CA certificate bundle which will be sent to instances and which will tipically be installed - // as a system wide trusted root CA by either cloud-init or whatever mechanism the provider will use to set - // up the runner. - CABundle string - // GitHubRunnerGroup is the github runner group in which the newly installed runner should be added to. - GitHubRunnerGroup string - // EnableBootDebug will enable bash debug mode. - EnableBootDebug bool - // ExtraContext is a map of extra context that will be passed to the runner install template. - // This option is useful for situations in which you're supplying your own template and you need - // to pass in information that is not available in the default template. - ExtraContext map[string]string - // UseJITConfig indicates whether to attempt to configure the runner using JIT or a registration token. - UseJITConfig bool -} - -func InstallRunnerScript(installParams InstallRunnerParams, osType params.OSType, tpl string) ([]byte, error) { - if tpl == "" { - switch osType { - case params.Linux: - tpl = CloudConfigTemplate - case params.Windows: - tpl = WindowsSetupScriptTemplate - default: - return nil, fmt.Errorf("unsupported os type: %s", osType) - } - } - - t, err := template.New("").Parse(tpl) - if err != nil { - return nil, errors.Wrap(err, "parsing template") - } - - var buf bytes.Buffer - if err := t.Execute(&buf, installParams); err != nil { - return nil, errors.Wrap(err, "rendering template") - } - - return buf.Bytes(), nil -} diff --git a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go b/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go deleted file mode 100644 index a709dbef..00000000 --- a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2023 Cloudbase Solutions SRL -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package cloudconfig - -import ( - "encoding/json" - "fmt" - "sort" - "strings" - - "github.com/cloudbase/garm-provider-common/defaults" - "github.com/cloudbase/garm-provider-common/params" - "github.com/pkg/errors" -) - -// CloudConfigSpec is a struct that holds extra specs that can be used to customize user data. -type CloudConfigSpec struct { - // RunnerInstallTemplate can be used to override the default runner install template. - // If used, the caller is responsible for the correctness of the template as well as the - // suitability of the template for the target OS. - RunnerInstallTemplate []byte `json:"runner_install_template"` - // PreInstallScripts is a map of pre-install scripts that will be run before the - // runner install script. These will run as root and can be used to prep a generic image - // before we attempt to install the runner. The key of the map is the name of the script - // as it will be written to disk. The value is a byte array with the contents of the script. - // - // These scripts will be added and run in alphabetical order. - // - // On Linux, we will set the executable flag. On Windows, the name matters as Windows looks for an - // extension to determine if the file is an executable or not. In theory this can hold binaries, - // but in most cases this will most likely hold scripts. We do not currenly validate the payload, - // so it's up to the user what they upload here. - // Caution needs to be exercised when using this feature, as the total size of userdata is limited - // on most providers. - PreInstallScripts map[string][]byte `json:"pre_install_scripts"` - // ExtraContext is a map of extra context that will be passed to the runner install template. - ExtraContext map[string]string `json:"extra_context"` -} - -func sortMapKeys(m map[string][]byte) []string { - var keys []string - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - return keys -} - -// GetSpecs returns the cloud config specific extra specs from the bootstrap params. -func GetSpecs(bootstrapParams params.BootstrapInstance) (CloudConfigSpec, error) { - var extraSpecs CloudConfigSpec - if len(bootstrapParams.ExtraSpecs) == 0 { - return extraSpecs, nil - } - - if err := json.Unmarshal(bootstrapParams.ExtraSpecs, &extraSpecs); err != nil { - return CloudConfigSpec{}, errors.Wrap(err, "unmarshaling extra specs") - } - - if extraSpecs.ExtraContext == nil { - extraSpecs.ExtraContext = map[string]string{} - } - - if extraSpecs.PreInstallScripts == nil { - extraSpecs.PreInstallScripts = map[string][]byte{} - } - - return extraSpecs, nil -} - -// GetRunnerInstallScript returns the runner install script for the given bootstrap params. -// This function will return either the default script for the given OS type or will use the supplied template -// if one is provided. -func GetRunnerInstallScript(bootstrapParams params.BootstrapInstance, tools params.RunnerApplicationDownload, runnerName string) ([]byte, error) { - if tools.GetFilename() == "" { - return nil, fmt.Errorf("missing tools filename") - } - - if tools.GetDownloadURL() == "" { - return nil, fmt.Errorf("missing tools download URL") - } - - tempToken := tools.GetTempDownloadToken() - extraSpecs, err := GetSpecs(bootstrapParams) - if err != nil { - return nil, errors.Wrap(err, "getting specs") - } - - installRunnerParams := InstallRunnerParams{ - FileName: tools.GetFilename(), - DownloadURL: tools.GetDownloadURL(), - TempDownloadToken: tempToken, - MetadataURL: bootstrapParams.MetadataURL, - RunnerUsername: defaults.DefaultUser, - RunnerGroup: defaults.DefaultUser, - RepoURL: bootstrapParams.RepoURL, - RunnerName: runnerName, - RunnerLabels: strings.Join(bootstrapParams.Labels, ","), - CallbackURL: bootstrapParams.CallbackURL, - CallbackToken: bootstrapParams.InstanceToken, - GitHubRunnerGroup: bootstrapParams.GitHubRunnerGroup, - ExtraContext: extraSpecs.ExtraContext, - EnableBootDebug: bootstrapParams.UserDataOptions.EnableBootDebug, - UseJITConfig: bootstrapParams.JitConfigEnabled, - } - - if bootstrapParams.CACertBundle != nil && len(bootstrapParams.CACertBundle) > 0 { - installRunnerParams.CABundle = string(bootstrapParams.CACertBundle) - } - - installScript, err := InstallRunnerScript(installRunnerParams, bootstrapParams.OSType, string(extraSpecs.RunnerInstallTemplate)) - if err != nil { - return nil, errors.Wrap(err, "generating script") - } - - return installScript, nil -} - -// GetCloudInitConfig returns the cloud-init specific userdata config. This config can be used on most clouds -// for most Linux machines. The install runner script must be generated separately either by GetRunnerInstallScript() -// or some other means. -func GetCloudInitConfig(bootstrapParams params.BootstrapInstance, installScript []byte) (string, error) { - extraSpecs, err := GetSpecs(bootstrapParams) - if err != nil { - return "", errors.Wrap(err, "getting specs") - } - - cloudCfg := NewDefaultCloudInitConfig() - - if bootstrapParams.UserDataOptions.DisableUpdatesOnBoot { - cloudCfg.PackageUpgrade = false - cloudCfg.Packages = []string{} - } - for _, pkg := range bootstrapParams.UserDataOptions.ExtraPackages { - cloudCfg.AddPackage(pkg) - } - - if len(extraSpecs.PreInstallScripts) > 0 { - names := sortMapKeys(extraSpecs.PreInstallScripts) - for _, name := range names { - script := extraSpecs.PreInstallScripts[name] - cloudCfg.AddFile(script, fmt.Sprintf("/garm-pre-install/%s", name), "root:root", "755") - cloudCfg.AddRunCmd(fmt.Sprintf("/garm-pre-install/%s", name)) - } - } - cloudCfg.AddRunCmd("rm -rf /garm-pre-install") - - cloudCfg.AddSSHKey(bootstrapParams.SSHKeys...) - cloudCfg.AddFile(installScript, "/install_runner.sh", "root:root", "755") - cloudCfg.AddRunCmd(fmt.Sprintf("su -l -c /install_runner.sh %s", defaults.DefaultUser)) - cloudCfg.AddRunCmd("rm -f /install_runner.sh") - if bootstrapParams.CACertBundle != nil && len(bootstrapParams.CACertBundle) > 0 { - if err := cloudCfg.AddCACert(bootstrapParams.CACertBundle); err != nil { - return "", errors.Wrap(err, "adding CA cert bundle") - } - } - - asStr, err := cloudCfg.Serialize() - if err != nil { - return "", errors.Wrap(err, "creating cloud config") - } - - return asStr, nil -} - -// GetCloudConfig is a helper function that generates a cloud-init config for Linux and a powershell script for Windows. -// In most cases this function should do, but in situations where a more custom approach is needed, you may need to call -// GetCloudInitConfig() or GetRunnerInstallScript() directly and compose the final userdata in a different way. -// The extra specs PreInstallScripts is only supported on Linux via cloud-init by this function. On some providers, like Azure -// Windows initialization scripts are run by creating a separate CustomScriptExtension resource for each individual script. -// On other clouds it may be different. This function aims to be generic, which is why it only supports the PreInstallScripts -// via cloud-init. -func GetCloudConfig(bootstrapParams params.BootstrapInstance, tools params.RunnerApplicationDownload, runnerName string) (string, error) { - installScript, err := GetRunnerInstallScript(bootstrapParams, tools, runnerName) - if err != nil { - return "", errors.Wrap(err, "generating script") - } - - var asStr string - switch bootstrapParams.OSType { - case params.Linux: - cloudCfg, err := GetCloudInitConfig(bootstrapParams, installScript) - if err != nil { - return "", errors.Wrap(err, "getting cloud init config") - } - return cloudCfg, nil - case params.Windows: - asStr = string(installScript) - default: - return "", fmt.Errorf("unknown os type: %s", bootstrapParams.OSType) - } - - return asStr, nil -} diff --git a/vendor/github.com/flosch/pongo2/.gitattributes b/vendor/github.com/flosch/pongo2/.gitattributes deleted file mode 100644 index fcadb2cf..00000000 --- a/vendor/github.com/flosch/pongo2/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -* text eol=lf diff --git a/vendor/github.com/flosch/pongo2/.gitignore b/vendor/github.com/flosch/pongo2/.gitignore deleted file mode 100644 index 1346be55..00000000 --- a/vendor/github.com/flosch/pongo2/.gitignore +++ /dev/null @@ -1,41 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.idea -.vscode - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.project -EBNF.txt -test1.tpl -pongo2_internal_test.go -tpl-error.out -/count.out -/cover.out -*.swp -*.iml -/cpu.out -/mem.out -/pongo2.test -*.error -/profile -/coverage.out -/pongo2_internal_test.ignore diff --git a/vendor/github.com/flosch/pongo2/.travis.yml b/vendor/github.com/flosch/pongo2/.travis.yml deleted file mode 100644 index e39e5d05..00000000 --- a/vendor/github.com/flosch/pongo2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -os: - - linux - - osx -go: - - 1.12 -script: - - go test -v diff --git a/vendor/github.com/flosch/pongo2/AUTHORS b/vendor/github.com/flosch/pongo2/AUTHORS deleted file mode 100644 index 601697cf..00000000 --- a/vendor/github.com/flosch/pongo2/AUTHORS +++ /dev/null @@ -1,11 +0,0 @@ -Main author and maintainer of pongo2: - -* Florian Schlachter - -Contributors (in no specific order): - -* @romanoaugusto88 -* @vitalbh -* @blaubaer - -Feel free to add yourself to the list or to modify your entry if you did a contribution. diff --git a/vendor/github.com/flosch/pongo2/LICENSE b/vendor/github.com/flosch/pongo2/LICENSE deleted file mode 100644 index e876f869..00000000 --- a/vendor/github.com/flosch/pongo2/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013-2014 Florian Schlachter - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/flosch/pongo2/README.md b/vendor/github.com/flosch/pongo2/README.md deleted file mode 100644 index e59694e2..00000000 --- a/vendor/github.com/flosch/pongo2/README.md +++ /dev/null @@ -1,167 +0,0 @@ -# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2 - -[![PkgGoDev](https://pkg.go.dev/badge/flosch/pongo2)](https://pkg.go.dev/flosch/pongo2) -[![Build Status](https://travis-ci.org/flosch/pongo2.svg?branch=master)](https://travis-ci.org/flosch/pongo2) - -pongo2 is a Django-syntax like templating-language. - -Install/update using `go get` (no dependencies required by pongo2): - -```sh -go get -u github.com/flosch/pongo2 -``` - -Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)). - -## First impression of a template - -```django - - - Our admins and users - - {# This is a short example to give you a quick overview of pongo2's syntax. #} - {% macro user_details(user, is_admin=false) %} -
- -

- = 40) || (user.karma > calc_avg_karma(userlist)+5) %} class="karma-good"{% - endif %}> - - - {{ user }} -

- - -

This user registered {{ user.register_date|naturaltime }}.

- - -

The user's biography:

-

- {{ user.biography|markdown|truncatewords_html:15 }} - read more -

- - {% if is_admin %} -

This user is an admin!

- {% endif %} -
- {% endmacro %} - - - - -

Our admins

- {% for admin in adminlist %} {{ user_details(admin, true) }} {% endfor %} - -

Our members

- {% for user in userlist %} {{ user_details(user) }} {% endfor %} - - -``` - -## Features - -- Syntax- and feature-set-compatible with [Django 1.7](https://django.readthedocs.io/en/1.7.x/topics/templates.html) -- [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl). -- [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl). -- [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser)) -- Additional features: - - Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl)) - - [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters) - -## Caveats - -### Filters - -- **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format). -- **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`. -- **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet. - -### Tags - -- **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`. -- **now**: takes Go's time format (see **date** and **time**-filter). - -### Misc - -- **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it): - `{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`. - -## Add-ons, libraries and helpers - -### Official - -- [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries. - -### 3rd-party - -- [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego). -- [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2. -- [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework. -- [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates -- [Build'n support for Iris' template engine](https://github.com/kataras/iris) -- [pongo2gin](https://gitlab.com/go-box/pongo2gin) - alternative renderer for [gin](github.com/gin-gonic/gin) to use pongo2 templates -- [pongo2-trans](https://github.com/digitalcrab/pongo2trans) - `trans`-tag implementation for internationalization -- [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework. -- [p2cli](https://github.com/wrouesnel/p2cli) - command line templating utility based on pongo2 - -Please add your project to this list and send me a pull request when you've developed something nice for pongo2. - -## Who's using pongo2 - -[I'm compiling a list of pongo2 users](https://github.com/flosch/pongo2/issues/241). Add your project or company! - -## API-usage examples - -Please see the documentation for a full list of provided API methods. - -### A tiny example (template string) - -```go -// Compile the template first (i. e. creating the AST) -tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!") -if err != nil { - panic(err) -} -// Now you can render the template with the given -// pongo2.Context how often you want to. -out, err := tpl.Execute(pongo2.Context{"name": "florian"}) -if err != nil { - panic(err) -} -fmt.Println(out) // Output: Hello Florian! -``` - -## Example server-usage (template file) - -```go -package main - -import ( - "github.com/flosch/pongo2" - "net/http" -) - -// Pre-compiling the templates at application startup using the -// little Must()-helper function (Must() will panic if FromFile() -// or FromString() will return with an error - that's it). -// It's faster to pre-compile it anywhere at startup and only -// execute the template later. -var tplExample = pongo2.Must(pongo2.FromFile("example.html")) - -func examplePage(w http.ResponseWriter, r *http.Request) { - // Execute the template per HTTP request - err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} - -func main() { - http.HandleFunc("/", examplePage) - http.ListenAndServe(":8080", nil) -} -``` diff --git a/vendor/github.com/flosch/pongo2/context.go b/vendor/github.com/flosch/pongo2/context.go deleted file mode 100644 index dbc5e3e3..00000000 --- a/vendor/github.com/flosch/pongo2/context.go +++ /dev/null @@ -1,137 +0,0 @@ -package pongo2 - -import ( - "fmt" - "regexp" - - "errors" -) - -var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$") - -var autoescape = true - -func SetAutoescape(newValue bool) { - autoescape = newValue -} - -// A Context type provides constants, variables, instances or functions to a template. -// -// pongo2 automatically provides meta-information or functions through the "pongo2"-key. -// Currently, context["pongo2"] contains the following keys: -// 1. version: returns the version string -// -// Template examples for accessing items from your context: -// {{ myconstant }} -// {{ myfunc("test", 42) }} -// {{ user.name }} -// {{ pongo2.version }} -type Context map[string]interface{} - -func (c Context) checkForValidIdentifiers() *Error { - for k, v := range c { - if !reIdentifiers.MatchString(k) { - return &Error{ - Sender: "checkForValidIdentifiers", - OrigError: fmt.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v), - } - } - } - return nil -} - -// Update updates this context with the key/value-pairs from another context. -func (c Context) Update(other Context) Context { - for k, v := range other { - c[k] = v - } - return c -} - -// ExecutionContext contains all data important for the current rendering state. -// -// If you're writing a custom tag, your tag's Execute()-function will -// have access to the ExecutionContext. This struct stores anything -// about the current rendering process's Context including -// the Context provided by the user (field Public). -// You can safely use the Private context to provide data to the user's -// template (like a 'forloop'-information). The Shared-context is used -// to share data between tags. All ExecutionContexts share this context. -// -// Please be careful when accessing the Public data. -// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only). -// -// To create your own execution context within tags, use the -// NewChildExecutionContext(parent) function. -type ExecutionContext struct { - template *Template - - Autoescape bool - Public Context - Private Context - Shared Context -} - -var pongo2MetaContext = Context{ - "version": Version, -} - -func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext { - privateCtx := make(Context) - - // Make the pongo2-related funcs/vars available to the context - privateCtx["pongo2"] = pongo2MetaContext - - return &ExecutionContext{ - template: tpl, - - Public: ctx, - Private: privateCtx, - Autoescape: autoescape, - } -} - -func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext { - newctx := &ExecutionContext{ - template: parent.template, - - Public: parent.Public, - Private: make(Context), - Autoescape: parent.Autoescape, - } - newctx.Shared = parent.Shared - - // Copy all existing private items - newctx.Private.Update(parent.Private) - - return newctx -} - -func (ctx *ExecutionContext) Error(msg string, token *Token) *Error { - return ctx.OrigError(errors.New(msg), token) -} - -func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error { - filename := ctx.template.name - var line, col int - if token != nil { - // No tokens available - // TODO: Add location (from where?) - filename = token.Filename - line = token.Line - col = token.Col - } - return &Error{ - Template: ctx.template, - Filename: filename, - Line: line, - Column: col, - Token: token, - Sender: "execution", - OrigError: err, - } -} - -func (ctx *ExecutionContext) Logf(format string, args ...interface{}) { - ctx.template.set.logf(format, args...) -} diff --git a/vendor/github.com/flosch/pongo2/doc.go b/vendor/github.com/flosch/pongo2/doc.go deleted file mode 100644 index 5a23e2b2..00000000 --- a/vendor/github.com/flosch/pongo2/doc.go +++ /dev/null @@ -1,31 +0,0 @@ -// A Django-syntax like template-engine -// -// Blog posts about pongo2 (including introduction and migration): -// https://www.florian-schlachter.de/?tag=pongo2 -// -// Complete documentation on the template language: -// https://docs.djangoproject.com/en/dev/topics/templates/ -// -// Try out pongo2 live in the pongo2 playground: -// https://www.florian-schlachter.de/pongo2/ -// -// Make sure to read README.md in the repository as well. -// -// A tiny example with template strings: -// -// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277) -// -// // Compile the template first (i. e. creating the AST) -// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!") -// if err != nil { -// panic(err) -// } -// // Now you can render the template with the given -// // pongo2.Context how often you want to. -// out, err := tpl.Execute(pongo2.Context{"name": "fred"}) -// if err != nil { -// panic(err) -// } -// fmt.Println(out) // Output: Hello Fred! -// -package pongo2 diff --git a/vendor/github.com/flosch/pongo2/error.go b/vendor/github.com/flosch/pongo2/error.go deleted file mode 100644 index 8aec8c10..00000000 --- a/vendor/github.com/flosch/pongo2/error.go +++ /dev/null @@ -1,91 +0,0 @@ -package pongo2 - -import ( - "bufio" - "fmt" - "os" -) - -// The Error type is being used to address an error during lexing, parsing or -// execution. If you want to return an error object (for example in your own -// tag or filter) fill this object with as much information as you have. -// Make sure "Sender" is always given (if you're returning an error within -// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag'). -// It's okay if you only fill in ErrorMsg if you don't have any other details at hand. -type Error struct { - Template *Template - Filename string - Line int - Column int - Token *Token - Sender string - OrigError error -} - -func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error { - if e.Template == nil { - e.Template = template - } - - if e.Token == nil { - e.Token = t - if e.Line <= 0 { - e.Line = t.Line - e.Column = t.Col - } - } - - return e -} - -// Returns a nice formatted error string. -func (e *Error) Error() string { - s := "[Error" - if e.Sender != "" { - s += " (where: " + e.Sender + ")" - } - if e.Filename != "" { - s += " in " + e.Filename - } - if e.Line > 0 { - s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column) - if e.Token != nil { - s += fmt.Sprintf(" near '%s'", e.Token.Val) - } - } - s += "] " - s += e.OrigError.Error() - return s -} - -// RawLine returns the affected line from the original template, if available. -func (e *Error) RawLine() (line string, available bool, outErr error) { - if e.Line <= 0 || e.Filename == "" { - return "", false, nil - } - - filename := e.Filename - if e.Template != nil { - filename = e.Template.set.resolveFilename(e.Template, e.Filename) - } - file, err := os.Open(filename) - if err != nil { - return "", false, err - } - defer func() { - err := file.Close() - if err != nil && outErr == nil { - outErr = err - } - }() - - scanner := bufio.NewScanner(file) - l := 0 - for scanner.Scan() { - l++ - if l == e.Line { - return scanner.Text(), true, nil - } - } - return "", false, nil -} diff --git a/vendor/github.com/flosch/pongo2/filters.go b/vendor/github.com/flosch/pongo2/filters.go deleted file mode 100644 index 8d4c89e2..00000000 --- a/vendor/github.com/flosch/pongo2/filters.go +++ /dev/null @@ -1,141 +0,0 @@ -package pongo2 - -import ( - "fmt" -) - -// FilterFunction is the type filter functions must fulfil -type FilterFunction func(in *Value, param *Value) (out *Value, err *Error) - -var filters map[string]FilterFunction - -func init() { - filters = make(map[string]FilterFunction) -} - -// FilterExists returns true if the given filter is already registered -func FilterExists(name string) bool { - _, existing := filters[name] - return existing -} - -// RegisterFilter registers a new filter. If there's already a filter with the same -// name, RegisterFilter will panic. You usually want to call this -// function in the filter's init() function: -// http://golang.org/doc/effective_go.html#init -// -// See http://www.florian-schlachter.de/post/pongo2/ for more about -// writing filters and tags. -func RegisterFilter(name string, fn FilterFunction) error { - if FilterExists(name) { - return fmt.Errorf("filter with name '%s' is already registered", name) - } - filters[name] = fn - return nil -} - -// ReplaceFilter replaces an already registered filter with a new implementation. Use this -// function with caution since it allows you to change existing filter behaviour. -func ReplaceFilter(name string, fn FilterFunction) error { - if !FilterExists(name) { - return fmt.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name) - } - filters[name] = fn - return nil -} - -// MustApplyFilter behaves like ApplyFilter, but panics on an error. -func MustApplyFilter(name string, value *Value, param *Value) *Value { - val, err := ApplyFilter(name, value, param) - if err != nil { - panic(err) - } - return val -} - -// ApplyFilter applies a filter to a given value using the given parameters. -// Returns a *pongo2.Value or an error. -func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) { - fn, existing := filters[name] - if !existing { - return nil, &Error{ - Sender: "applyfilter", - OrigError: fmt.Errorf("Filter with name '%s' not found.", name), - } - } - - // Make sure param is a *Value - if param == nil { - param = AsValue(nil) - } - - return fn(value, param) -} - -type filterCall struct { - token *Token - - name string - parameter IEvaluator - - filterFunc FilterFunction -} - -func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) { - var param *Value - var err *Error - - if fc.parameter != nil { - param, err = fc.parameter.Evaluate(ctx) - if err != nil { - return nil, err - } - } else { - param = AsValue(nil) - } - - filteredValue, err := fc.filterFunc(v, param) - if err != nil { - return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token) - } - return filteredValue, nil -} - -// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter -func (p *Parser) parseFilter() (*filterCall, *Error) { - identToken := p.MatchType(TokenIdentifier) - - // Check filter ident - if identToken == nil { - return nil, p.Error("Filter name must be an identifier.", nil) - } - - filter := &filterCall{ - token: identToken, - name: identToken.Val, - } - - // Get the appropriate filter function and bind it - filterFn, exists := filters[identToken.Val] - if !exists { - return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken) - } - - filter.filterFunc = filterFn - - // Check for filter-argument (2 tokens needed: ':' ARG) - if p.Match(TokenSymbol, ":") != nil { - if p.Peek(TokenSymbol, "}}") != nil { - return nil, p.Error("Filter parameter required after ':'.", nil) - } - - // Get filter argument expression - v, err := p.parseVariableOrLiteral() - if err != nil { - return nil, err - } - filter.parameter = v - } - - return filter, nil -} diff --git a/vendor/github.com/flosch/pongo2/filters_builtin.go b/vendor/github.com/flosch/pongo2/filters_builtin.go deleted file mode 100644 index c0ec6161..00000000 --- a/vendor/github.com/flosch/pongo2/filters_builtin.go +++ /dev/null @@ -1,927 +0,0 @@ -package pongo2 - -/* Filters that are provided through github.com/flosch/pongo2-addons: - ------------------------------------------------------------------ - - filesizeformat - slugify - timesince - timeuntil - - Filters that won't be added: - ---------------------------- - - get_static_prefix (reason: web-framework specific) - pprint (reason: python-specific) - static (reason: web-framework specific) - - Reconsideration (not implemented yet): - -------------------------------------- - - force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter) - safeseq (reason: same reason as `force_escape`) - unordered_list (python-specific; not sure whether needed or not) - dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name) - dictsortreversed (see dictsort) -*/ - -import ( - "bytes" - "fmt" - "math/rand" - "net/url" - "regexp" - "strconv" - "strings" - "time" - "unicode/utf8" - - "errors" -) - -func init() { - rand.Seed(time.Now().Unix()) - - RegisterFilter("escape", filterEscape) - RegisterFilter("safe", filterSafe) - RegisterFilter("escapejs", filterEscapejs) - - RegisterFilter("add", filterAdd) - RegisterFilter("addslashes", filterAddslashes) - RegisterFilter("capfirst", filterCapfirst) - RegisterFilter("center", filterCenter) - RegisterFilter("cut", filterCut) - RegisterFilter("date", filterDate) - RegisterFilter("default", filterDefault) - RegisterFilter("default_if_none", filterDefaultIfNone) - RegisterFilter("divisibleby", filterDivisibleby) - RegisterFilter("first", filterFirst) - RegisterFilter("floatformat", filterFloatformat) - RegisterFilter("get_digit", filterGetdigit) - RegisterFilter("iriencode", filterIriencode) - RegisterFilter("join", filterJoin) - RegisterFilter("last", filterLast) - RegisterFilter("length", filterLength) - RegisterFilter("length_is", filterLengthis) - RegisterFilter("linebreaks", filterLinebreaks) - RegisterFilter("linebreaksbr", filterLinebreaksbr) - RegisterFilter("linenumbers", filterLinenumbers) - RegisterFilter("ljust", filterLjust) - RegisterFilter("lower", filterLower) - RegisterFilter("make_list", filterMakelist) - RegisterFilter("phone2numeric", filterPhone2numeric) - RegisterFilter("pluralize", filterPluralize) - RegisterFilter("random", filterRandom) - RegisterFilter("removetags", filterRemovetags) - RegisterFilter("rjust", filterRjust) - RegisterFilter("slice", filterSlice) - RegisterFilter("split", filterSplit) - RegisterFilter("stringformat", filterStringformat) - RegisterFilter("striptags", filterStriptags) - RegisterFilter("time", filterDate) // time uses filterDate (same golang-format) - RegisterFilter("title", filterTitle) - RegisterFilter("truncatechars", filterTruncatechars) - RegisterFilter("truncatechars_html", filterTruncatecharsHTML) - RegisterFilter("truncatewords", filterTruncatewords) - RegisterFilter("truncatewords_html", filterTruncatewordsHTML) - RegisterFilter("upper", filterUpper) - RegisterFilter("urlencode", filterUrlencode) - RegisterFilter("urlize", filterUrlize) - RegisterFilter("urlizetrunc", filterUrlizetrunc) - RegisterFilter("wordcount", filterWordcount) - RegisterFilter("wordwrap", filterWordwrap) - RegisterFilter("yesno", filterYesno) - - RegisterFilter("float", filterFloat) // pongo-specific - RegisterFilter("integer", filterInteger) // pongo-specific -} - -func filterTruncatecharsHelper(s string, newLen int) string { - runes := []rune(s) - if newLen < len(runes) { - if newLen >= 3 { - return fmt.Sprintf("%s...", string(runes[:newLen-3])) - } - // Not enough space for the ellipsis - return string(runes[:newLen]) - } - return string(runes) -} - -func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) { - vLen := len(value) - var tagStack []string - idx := 0 - - for idx < vLen && !cond() { - c, s := utf8.DecodeRuneInString(value[idx:]) - if c == utf8.RuneError { - idx += s - continue - } - - if c == '<' { - newOutput.WriteRune(c) - idx += s // consume "<" - - if idx+1 < vLen { - if value[idx] == '/' { - // Close tag - - newOutput.WriteString("/") - - tag := "" - idx++ // consume "/" - - for idx < vLen { - c2, size2 := utf8.DecodeRuneInString(value[idx:]) - if c2 == utf8.RuneError { - idx += size2 - continue - } - - // End of tag found - if c2 == '>' { - idx++ // consume ">" - break - } - tag += string(c2) - idx += size2 - } - - if len(tagStack) > 0 { - // Ideally, the close tag is TOP of tag stack - // In malformed HTML, it must not be, so iterate through the stack and remove the tag - for i := len(tagStack) - 1; i >= 0; i-- { - if tagStack[i] == tag { - // Found the tag - tagStack[i] = tagStack[len(tagStack)-1] - tagStack = tagStack[:len(tagStack)-1] - break - } - } - } - - newOutput.WriteString(tag) - newOutput.WriteString(">") - } else { - // Open tag - - tag := "" - - params := false - for idx < vLen { - c2, size2 := utf8.DecodeRuneInString(value[idx:]) - if c2 == utf8.RuneError { - idx += size2 - continue - } - - newOutput.WriteRune(c2) - - // End of tag found - if c2 == '>' { - idx++ // consume ">" - break - } - - if !params { - if c2 == ' ' { - params = true - } else { - tag += string(c2) - } - } - - idx += size2 - } - - // Add tag to stack - tagStack = append(tagStack, tag) - } - } - } else { - idx = fn(c, s, idx) - } - } - - finalize() - - for i := len(tagStack) - 1; i >= 0; i-- { - tag := tagStack[i] - // Close everything from the regular tag stack - newOutput.WriteString(fmt.Sprintf("", tag)) - } -} - -func filterTruncatechars(in *Value, param *Value) (*Value, *Error) { - s := in.String() - newLen := param.Integer() - return AsValue(filterTruncatecharsHelper(s, newLen)), nil -} - -func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) { - value := in.String() - newLen := max(param.Integer()-3, 0) - - newOutput := bytes.NewBuffer(nil) - - textcounter := 0 - - filterTruncateHTMLHelper(value, newOutput, func() bool { - return textcounter >= newLen - }, func(c rune, s int, idx int) int { - textcounter++ - newOutput.WriteRune(c) - - return idx + s - }, func() { - if textcounter >= newLen && textcounter < len(value) { - newOutput.WriteString("...") - } - }) - - return AsSafeValue(newOutput.String()), nil -} - -func filterTruncatewords(in *Value, param *Value) (*Value, *Error) { - words := strings.Fields(in.String()) - n := param.Integer() - if n <= 0 { - return AsValue(""), nil - } - nlen := min(len(words), n) - out := make([]string, 0, nlen) - for i := 0; i < nlen; i++ { - out = append(out, words[i]) - } - - if n < len(words) { - out = append(out, "...") - } - - return AsValue(strings.Join(out, " ")), nil -} - -func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) { - value := in.String() - newLen := max(param.Integer(), 0) - - newOutput := bytes.NewBuffer(nil) - - wordcounter := 0 - - filterTruncateHTMLHelper(value, newOutput, func() bool { - return wordcounter >= newLen - }, func(_ rune, _ int, idx int) int { - // Get next word - wordFound := false - - for idx < len(value) { - c2, size2 := utf8.DecodeRuneInString(value[idx:]) - if c2 == utf8.RuneError { - idx += size2 - continue - } - - if c2 == '<' { - // HTML tag start, don't consume it - return idx - } - - newOutput.WriteRune(c2) - idx += size2 - - if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' { - // Word ends here, stop capturing it now - break - } else { - wordFound = true - } - } - - if wordFound { - wordcounter++ - } - - return idx - }, func() { - if wordcounter >= newLen { - newOutput.WriteString("...") - } - }) - - return AsSafeValue(newOutput.String()), nil -} - -func filterEscape(in *Value, param *Value) (*Value, *Error) { - output := strings.Replace(in.String(), "&", "&", -1) - output = strings.Replace(output, ">", ">", -1) - output = strings.Replace(output, "<", "<", -1) - output = strings.Replace(output, "\"", """, -1) - output = strings.Replace(output, "'", "'", -1) - return AsValue(output), nil -} - -func filterSafe(in *Value, param *Value) (*Value, *Error) { - return in, nil // nothing to do here, just to keep track of the safe application -} - -func filterEscapejs(in *Value, param *Value) (*Value, *Error) { - sin := in.String() - - var b bytes.Buffer - - idx := 0 - for idx < len(sin) { - c, size := utf8.DecodeRuneInString(sin[idx:]) - if c == utf8.RuneError { - idx += size - continue - } - - if c == '\\' { - // Escape seq? - if idx+1 < len(sin) { - switch sin[idx+1] { - case 'r': - b.WriteString(fmt.Sprintf(`\u%04X`, '\r')) - idx += 2 - continue - case 'n': - b.WriteString(fmt.Sprintf(`\u%04X`, '\n')) - idx += 2 - continue - /*case '\'': - b.WriteString(fmt.Sprintf(`\u%04X`, '\'')) - idx += 2 - continue - case '"': - b.WriteString(fmt.Sprintf(`\u%04X`, '"')) - idx += 2 - continue*/ - } - } - } - - if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' { - b.WriteRune(c) - } else { - b.WriteString(fmt.Sprintf(`\u%04X`, c)) - } - - idx += size - } - - return AsValue(b.String()), nil -} - -func filterAdd(in *Value, param *Value) (*Value, *Error) { - if in.IsNumber() && param.IsNumber() { - if in.IsFloat() || param.IsFloat() { - return AsValue(in.Float() + param.Float()), nil - } - return AsValue(in.Integer() + param.Integer()), nil - } - // If in/param is not a number, we're relying on the - // Value's String() conversion and just add them both together - return AsValue(in.String() + param.String()), nil -} - -func filterAddslashes(in *Value, param *Value) (*Value, *Error) { - output := strings.Replace(in.String(), "\\", "\\\\", -1) - output = strings.Replace(output, "\"", "\\\"", -1) - output = strings.Replace(output, "'", "\\'", -1) - return AsValue(output), nil -} - -func filterCut(in *Value, param *Value) (*Value, *Error) { - return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil -} - -func filterLength(in *Value, param *Value) (*Value, *Error) { - return AsValue(in.Len()), nil -} - -func filterLengthis(in *Value, param *Value) (*Value, *Error) { - return AsValue(in.Len() == param.Integer()), nil -} - -func filterDefault(in *Value, param *Value) (*Value, *Error) { - if !in.IsTrue() { - return param, nil - } - return in, nil -} - -func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) { - if in.IsNil() { - return param, nil - } - return in, nil -} - -func filterDivisibleby(in *Value, param *Value) (*Value, *Error) { - if param.Integer() == 0 { - return AsValue(false), nil - } - return AsValue(in.Integer()%param.Integer() == 0), nil -} - -func filterFirst(in *Value, param *Value) (*Value, *Error) { - if in.CanSlice() && in.Len() > 0 { - return in.Index(0), nil - } - return AsValue(""), nil -} - -func filterFloatformat(in *Value, param *Value) (*Value, *Error) { - val := in.Float() - - decimals := -1 - if !param.IsNil() { - // Any argument provided? - decimals = param.Integer() - } - - // if the argument is not a number (e. g. empty), the default - // behaviour is trim the result - trim := !param.IsNumber() - - if decimals <= 0 { - // argument is negative or zero, so we - // want the output being trimmed - decimals = -decimals - trim = true - } - - if trim { - // Remove zeroes - if float64(int(val)) == val { - return AsValue(in.Integer()), nil - } - } - - return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil -} - -func filterGetdigit(in *Value, param *Value) (*Value, *Error) { - i := param.Integer() - l := len(in.String()) // do NOT use in.Len() here! - if i <= 0 || i > l { - return in, nil - } - return AsValue(in.String()[l-i] - 48), nil -} - -const filterIRIChars = "/#%[]=:;$&()+,!?*@'~" - -func filterIriencode(in *Value, param *Value) (*Value, *Error) { - var b bytes.Buffer - - sin := in.String() - for _, r := range sin { - if strings.IndexRune(filterIRIChars, r) >= 0 { - b.WriteRune(r) - } else { - b.WriteString(url.QueryEscape(string(r))) - } - } - - return AsValue(b.String()), nil -} - -func filterJoin(in *Value, param *Value) (*Value, *Error) { - if !in.CanSlice() { - return in, nil - } - sep := param.String() - sl := make([]string, 0, in.Len()) - for i := 0; i < in.Len(); i++ { - sl = append(sl, in.Index(i).String()) - } - return AsValue(strings.Join(sl, sep)), nil -} - -func filterLast(in *Value, param *Value) (*Value, *Error) { - if in.CanSlice() && in.Len() > 0 { - return in.Index(in.Len() - 1), nil - } - return AsValue(""), nil -} - -func filterUpper(in *Value, param *Value) (*Value, *Error) { - return AsValue(strings.ToUpper(in.String())), nil -} - -func filterLower(in *Value, param *Value) (*Value, *Error) { - return AsValue(strings.ToLower(in.String())), nil -} - -func filterMakelist(in *Value, param *Value) (*Value, *Error) { - s := in.String() - result := make([]string, 0, len(s)) - for _, c := range s { - result = append(result, string(c)) - } - return AsValue(result), nil -} - -func filterCapfirst(in *Value, param *Value) (*Value, *Error) { - if in.Len() <= 0 { - return AsValue(""), nil - } - t := in.String() - r, size := utf8.DecodeRuneInString(t) - return AsValue(strings.ToUpper(string(r)) + t[size:]), nil -} - -func filterCenter(in *Value, param *Value) (*Value, *Error) { - width := param.Integer() - slen := in.Len() - if width <= slen { - return in, nil - } - - spaces := width - slen - left := spaces/2 + spaces%2 - right := spaces / 2 - - return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left), - in.String(), strings.Repeat(" ", right))), nil -} - -func filterDate(in *Value, param *Value) (*Value, *Error) { - t, isTime := in.Interface().(time.Time) - if !isTime { - return nil, &Error{ - Sender: "filter:date", - OrigError: errors.New("filter input argument must be of type 'time.Time'"), - } - } - return AsValue(t.Format(param.String())), nil -} - -func filterFloat(in *Value, param *Value) (*Value, *Error) { - return AsValue(in.Float()), nil -} - -func filterInteger(in *Value, param *Value) (*Value, *Error) { - return AsValue(in.Integer()), nil -} - -func filterLinebreaks(in *Value, param *Value) (*Value, *Error) { - if in.Len() == 0 { - return in, nil - } - - var b bytes.Buffer - - // Newline =
- // Double newline =

...

- lines := strings.Split(in.String(), "\n") - lenlines := len(lines) - - opened := false - - for idx, line := range lines { - - if !opened { - b.WriteString("

") - opened = true - } - - b.WriteString(line) - - if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" { - // We've not reached the end - if strings.TrimSpace(lines[idx+1]) == "" { - // Next line is empty - if opened { - b.WriteString("

") - opened = false - } - } else { - b.WriteString("
") - } - } - } - - if opened { - b.WriteString("

") - } - - return AsValue(b.String()), nil -} - -func filterSplit(in *Value, param *Value) (*Value, *Error) { - chunks := strings.Split(in.String(), param.String()) - - return AsValue(chunks), nil -} - -func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) { - return AsValue(strings.Replace(in.String(), "\n", "
", -1)), nil -} - -func filterLinenumbers(in *Value, param *Value) (*Value, *Error) { - lines := strings.Split(in.String(), "\n") - output := make([]string, 0, len(lines)) - for idx, line := range lines { - output = append(output, fmt.Sprintf("%d. %s", idx+1, line)) - } - return AsValue(strings.Join(output, "\n")), nil -} - -func filterLjust(in *Value, param *Value) (*Value, *Error) { - times := param.Integer() - in.Len() - if times < 0 { - times = 0 - } - return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil -} - -func filterUrlencode(in *Value, param *Value) (*Value, *Error) { - return AsValue(url.QueryEscape(in.String())), nil -} - -// TODO: This regexp could do some work -var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`) -var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`) - -func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) { - var soutErr error - sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string { - var prefix string - var suffix string - if strings.HasPrefix(raw_url, " ") { - prefix = " " - } - if strings.HasSuffix(raw_url, " ") { - suffix = " " - } - - raw_url = strings.TrimSpace(raw_url) - - t, err := ApplyFilter("iriencode", AsValue(raw_url), nil) - if err != nil { - soutErr = err - return "" - } - url := t.String() - - if !strings.HasPrefix(url, "http") { - url = fmt.Sprintf("http://%s", url) - } - - title := raw_url - - if trunc > 3 && len(title) > trunc { - title = fmt.Sprintf("%s...", title[:trunc-3]) - } - - if autoescape { - t, err := ApplyFilter("escape", AsValue(title), nil) - if err != nil { - soutErr = err - return "" - } - title = t.String() - } - - return fmt.Sprintf(`%s%s%s`, prefix, url, title, suffix) - }) - if soutErr != nil { - return "", soutErr - } - - sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string { - title := mail - - if trunc > 3 && len(title) > trunc { - title = fmt.Sprintf("%s...", title[:trunc-3]) - } - - return fmt.Sprintf(`%s`, mail, title) - }) - - return sout, nil -} - -func filterUrlize(in *Value, param *Value) (*Value, *Error) { - autoescape := true - if param.IsBool() { - autoescape = param.Bool() - } - - s, err := filterUrlizeHelper(in.String(), autoescape, -1) - if err != nil { - - } - - return AsValue(s), nil -} - -func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) { - s, err := filterUrlizeHelper(in.String(), true, param.Integer()) - if err != nil { - return nil, &Error{ - Sender: "filter:urlizetrunc", - OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"), - } - } - return AsValue(s), nil -} - -func filterStringformat(in *Value, param *Value) (*Value, *Error) { - return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil -} - -var reStriptags = regexp.MustCompile("<[^>]*?>") - -func filterStriptags(in *Value, param *Value) (*Value, *Error) { - s := in.String() - - // Strip all tags - s = reStriptags.ReplaceAllString(s, "") - - return AsValue(strings.TrimSpace(s)), nil -} - -// https://en.wikipedia.org/wiki/Phoneword -var filterPhone2numericMap = map[string]string{ - "a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5", - "l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8", - "w": "9", "x": "9", "y": "9", "z": "9", -} - -func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) { - sin := in.String() - for k, v := range filterPhone2numericMap { - sin = strings.Replace(sin, k, v, -1) - sin = strings.Replace(sin, strings.ToUpper(k), v, -1) - } - return AsValue(sin), nil -} - -func filterPluralize(in *Value, param *Value) (*Value, *Error) { - if in.IsNumber() { - // Works only on numbers - if param.Len() > 0 { - endings := strings.Split(param.String(), ",") - if len(endings) > 2 { - return nil, &Error{ - Sender: "filter:pluralize", - OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"), - } - } - if len(endings) == 1 { - // 1 argument - if in.Integer() != 1 { - return AsValue(endings[0]), nil - } - } else { - if in.Integer() != 1 { - // 2 arguments - return AsValue(endings[1]), nil - } - return AsValue(endings[0]), nil - } - } else { - if in.Integer() != 1 { - // return default 's' - return AsValue("s"), nil - } - } - - return AsValue(""), nil - } - return nil, &Error{ - Sender: "filter:pluralize", - OrigError: errors.New("filter 'pluralize' does only work on numbers"), - } -} - -func filterRandom(in *Value, param *Value) (*Value, *Error) { - if !in.CanSlice() || in.Len() <= 0 { - return in, nil - } - i := rand.Intn(in.Len()) - return in.Index(i), nil -} - -func filterRemovetags(in *Value, param *Value) (*Value, *Error) { - s := in.String() - tags := strings.Split(param.String(), ",") - - // Strip only specific tags - for _, tag := range tags { - re := regexp.MustCompile(fmt.Sprintf("", tag)) - s = re.ReplaceAllString(s, "") - } - - return AsValue(strings.TrimSpace(s)), nil -} - -func filterRjust(in *Value, param *Value) (*Value, *Error) { - return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil -} - -func filterSlice(in *Value, param *Value) (*Value, *Error) { - comp := strings.Split(param.String(), ":") - if len(comp) != 2 { - return nil, &Error{ - Sender: "filter:slice", - OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"), - } - } - - if !in.CanSlice() { - return in, nil - } - - from := AsValue(comp[0]).Integer() - to := in.Len() - - if from > to { - from = to - } - - vto := AsValue(comp[1]).Integer() - if vto >= from && vto <= in.Len() { - to = vto - } - - return in.Slice(from, to), nil -} - -func filterTitle(in *Value, param *Value) (*Value, *Error) { - if !in.IsString() { - return AsValue(""), nil - } - return AsValue(strings.Title(strings.ToLower(in.String()))), nil -} - -func filterWordcount(in *Value, param *Value) (*Value, *Error) { - return AsValue(len(strings.Fields(in.String()))), nil -} - -func filterWordwrap(in *Value, param *Value) (*Value, *Error) { - words := strings.Fields(in.String()) - wordsLen := len(words) - wrapAt := param.Integer() - if wrapAt <= 0 { - return in, nil - } - - linecount := wordsLen/wrapAt + wordsLen%wrapAt - lines := make([]string, 0, linecount) - for i := 0; i < linecount; i++ { - lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " ")) - } - return AsValue(strings.Join(lines, "\n")), nil -} - -func filterYesno(in *Value, param *Value) (*Value, *Error) { - choices := map[int]string{ - 0: "yes", - 1: "no", - 2: "maybe", - } - paramString := param.String() - customChoices := strings.Split(paramString, ",") - if len(paramString) > 0 { - if len(customChoices) > 3 { - return nil, &Error{ - Sender: "filter:yesno", - OrigError: fmt.Errorf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString), - } - } - if len(customChoices) < 2 { - return nil, &Error{ - Sender: "filter:yesno", - OrigError: fmt.Errorf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString), - } - } - - // Map to the options now - choices[0] = customChoices[0] - choices[1] = customChoices[1] - if len(customChoices) == 3 { - choices[2] = customChoices[2] - } - } - - // maybe - if in.IsNil() { - return AsValue(choices[2]), nil - } - - // yes - if in.IsTrue() { - return AsValue(choices[0]), nil - } - - // no - return AsValue(choices[1]), nil -} diff --git a/vendor/github.com/flosch/pongo2/helpers.go b/vendor/github.com/flosch/pongo2/helpers.go deleted file mode 100644 index 880dbc04..00000000 --- a/vendor/github.com/flosch/pongo2/helpers.go +++ /dev/null @@ -1,15 +0,0 @@ -package pongo2 - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/flosch/pongo2/lexer.go b/vendor/github.com/flosch/pongo2/lexer.go deleted file mode 100644 index f1897984..00000000 --- a/vendor/github.com/flosch/pongo2/lexer.go +++ /dev/null @@ -1,432 +0,0 @@ -package pongo2 - -import ( - "fmt" - "strings" - "unicode/utf8" - - "errors" -) - -const ( - TokenError = iota - EOF - - TokenHTML - - TokenKeyword - TokenIdentifier - TokenString - TokenNumber - TokenSymbol -) - -var ( - tokenSpaceChars = " \n\r\t" - tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_" - tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789" - tokenDigits = "0123456789" - - // Available symbols in pongo2 (within filters/tag) - TokenSymbols = []string{ - // 3-Char symbols - "{{-", "-}}", "{%-", "-%}", - - // 2-Char symbols - "==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>", - - // 1-Char symbol - "(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%", - } - - // Available keywords in pongo2 - TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"} -) - -type TokenType int -type Token struct { - Filename string - Typ TokenType - Val string - Line int - Col int - TrimWhitespaces bool -} - -type lexerStateFn func() lexerStateFn -type lexer struct { - name string - input string - start int // start pos of the item - pos int // current pos - width int // width of last rune - tokens []*Token - errored bool - startline int - startcol int - line int - col int - - inVerbatim bool - verbatimName string -} - -func (t *Token) String() string { - val := t.Val - if len(val) > 1000 { - val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:]) - } - - typ := "" - switch t.Typ { - case TokenHTML: - typ = "HTML" - case TokenError: - typ = "Error" - case TokenIdentifier: - typ = "Identifier" - case TokenKeyword: - typ = "Keyword" - case TokenNumber: - typ = "Number" - case TokenString: - typ = "String" - case TokenSymbol: - typ = "Symbol" - default: - typ = "Unknown" - } - - return fmt.Sprintf("", - typ, t.Typ, val, t.Line, t.Col, t.TrimWhitespaces) -} - -func lex(name string, input string) ([]*Token, *Error) { - l := &lexer{ - name: name, - input: input, - tokens: make([]*Token, 0, 100), - line: 1, - col: 1, - startline: 1, - startcol: 1, - } - l.run() - if l.errored { - errtoken := l.tokens[len(l.tokens)-1] - return nil, &Error{ - Filename: name, - Line: errtoken.Line, - Column: errtoken.Col, - Sender: "lexer", - OrigError: errors.New(errtoken.Val), - } - } - return l.tokens, nil -} - -func (l *lexer) value() string { - return l.input[l.start:l.pos] -} - -func (l *lexer) length() int { - return l.pos - l.start -} - -func (l *lexer) emit(t TokenType) { - tok := &Token{ - Filename: l.name, - Typ: t, - Val: l.value(), - Line: l.startline, - Col: l.startcol, - } - - if t == TokenString { - // Escape sequence \" in strings - tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1) - tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1) - } - - if t == TokenSymbol && len(tok.Val) == 3 && (strings.HasSuffix(tok.Val, "-") || strings.HasPrefix(tok.Val, "-")) { - tok.TrimWhitespaces = true - tok.Val = strings.Replace(tok.Val, "-", "", -1) - } - - l.tokens = append(l.tokens, tok) - l.start = l.pos - l.startline = l.line - l.startcol = l.col -} - -func (l *lexer) next() rune { - if l.pos >= len(l.input) { - l.width = 0 - return EOF - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = w - l.pos += l.width - l.col += l.width - return r -} - -func (l *lexer) backup() { - l.pos -= l.width - l.col -= l.width -} - -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -func (l *lexer) ignore() { - l.start = l.pos - l.startline = l.line - l.startcol = l.col -} - -func (l *lexer) accept(what string) bool { - if strings.IndexRune(what, l.next()) >= 0 { - return true - } - l.backup() - return false -} - -func (l *lexer) acceptRun(what string) { - for strings.IndexRune(what, l.next()) >= 0 { - } - l.backup() -} - -func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn { - t := &Token{ - Filename: l.name, - Typ: TokenError, - Val: fmt.Sprintf(format, args...), - Line: l.startline, - Col: l.startcol, - } - l.tokens = append(l.tokens, t) - l.errored = true - l.startline = l.line - l.startcol = l.col - return nil -} - -func (l *lexer) eof() bool { - return l.start >= len(l.input)-1 -} - -func (l *lexer) run() { - for { - // TODO: Support verbatim tag names - // https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim - if l.inVerbatim { - name := l.verbatimName - if name != "" { - name += " " - } - if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim - if l.pos > l.start { - l.emit(TokenHTML) - } - w := len("{% endverbatim %}") - l.pos += w - l.col += w - l.ignore() - l.inVerbatim = false - } - } else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag - if l.pos > l.start { - l.emit(TokenHTML) - } - l.inVerbatim = true - w := len("{% verbatim %}") - l.pos += w - l.col += w - l.ignore() - } - - if !l.inVerbatim { - // Ignore single-line comments {# ... #} - if strings.HasPrefix(l.input[l.pos:], "{#") { - if l.pos > l.start { - l.emit(TokenHTML) - } - - l.pos += 2 // pass '{#' - l.col += 2 - - for { - switch l.peek() { - case EOF: - l.errorf("Single-line comment not closed.") - return - case '\n': - l.errorf("Newline not permitted in a single-line comment.") - return - } - - if strings.HasPrefix(l.input[l.pos:], "#}") { - l.pos += 2 // pass '#}' - l.col += 2 - break - } - - l.next() - } - l.ignore() // ignore whole comment - - // Comment skipped - continue // next token - } - - if strings.HasPrefix(l.input[l.pos:], "{{") || // variable - strings.HasPrefix(l.input[l.pos:], "{%") { // tag - if l.pos > l.start { - l.emit(TokenHTML) - } - l.tokenize() - if l.errored { - return - } - continue - } - } - - switch l.peek() { - case '\n': - l.line++ - l.col = 0 - } - if l.next() == EOF { - break - } - } - - if l.pos > l.start { - l.emit(TokenHTML) - } - - if l.inVerbatim { - l.errorf("verbatim-tag not closed, got EOF.") - } -} - -func (l *lexer) tokenize() { - for state := l.stateCode; state != nil; { - state = state() - } -} - -func (l *lexer) stateCode() lexerStateFn { -outer_loop: - for { - switch { - case l.accept(tokenSpaceChars): - if l.value() == "\n" { - return l.errorf("Newline not allowed within tag/variable.") - } - l.ignore() - continue - case l.accept(tokenIdentifierChars): - return l.stateIdentifier - case l.accept(tokenDigits): - return l.stateNumber - case l.accept(`"'`): - return l.stateString - } - - // Check for symbol - for _, sym := range TokenSymbols { - if strings.HasPrefix(l.input[l.start:], sym) { - l.pos += len(sym) - l.col += l.length() - l.emit(TokenSymbol) - - if sym == "%}" || sym == "-%}" || sym == "}}" || sym == "-}}" { - // Tag/variable end, return after emit - return nil - } - - continue outer_loop - } - } - - break - } - - // Normal shut down - return nil -} - -func (l *lexer) stateIdentifier() lexerStateFn { - l.acceptRun(tokenIdentifierChars) - l.acceptRun(tokenIdentifierCharsWithDigits) - for _, kw := range TokenKeywords { - if kw == l.value() { - l.emit(TokenKeyword) - return l.stateCode - } - } - l.emit(TokenIdentifier) - return l.stateCode -} - -func (l *lexer) stateNumber() lexerStateFn { - l.acceptRun(tokenDigits) - if l.accept(tokenIdentifierCharsWithDigits) { - // This seems to be an identifier starting with a number. - // See https://github.com/flosch/pongo2/issues/151 - return l.stateIdentifier() - } - /* - Maybe context-sensitive number lexing? - * comments.0.Text // first comment - * usercomments.1.0 // second user, first comment - * if (score >= 8.5) // 8.5 as a number - - if l.peek() == '.' { - l.accept(".") - if !l.accept(tokenDigits) { - return l.errorf("Malformed number.") - } - l.acceptRun(tokenDigits) - } - */ - l.emit(TokenNumber) - return l.stateCode -} - -func (l *lexer) stateString() lexerStateFn { - quotationMark := l.value() - l.ignore() - l.startcol-- // we're starting the position at the first " - for !l.accept(quotationMark) { - switch l.next() { - case '\\': - // escape sequence - switch l.peek() { - case '"', '\\': - l.next() - default: - return l.errorf("Unknown escape sequence: \\%c", l.peek()) - } - case EOF: - return l.errorf("Unexpected EOF, string not closed.") - case '\n': - return l.errorf("Newline in string is not allowed.") - } - } - l.backup() - l.emit(TokenString) - - l.next() - l.ignore() - - return l.stateCode -} diff --git a/vendor/github.com/flosch/pongo2/nodes.go b/vendor/github.com/flosch/pongo2/nodes.go deleted file mode 100644 index 5b039cdf..00000000 --- a/vendor/github.com/flosch/pongo2/nodes.go +++ /dev/null @@ -1,16 +0,0 @@ -package pongo2 - -// The root document -type nodeDocument struct { - Nodes []INode -} - -func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - for _, n := range doc.Nodes { - err := n.Execute(ctx, writer) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/flosch/pongo2/nodes_html.go b/vendor/github.com/flosch/pongo2/nodes_html.go deleted file mode 100644 index b980a3a5..00000000 --- a/vendor/github.com/flosch/pongo2/nodes_html.go +++ /dev/null @@ -1,23 +0,0 @@ -package pongo2 - -import ( - "strings" -) - -type nodeHTML struct { - token *Token - trimLeft bool - trimRight bool -} - -func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - res := n.token.Val - if n.trimLeft { - res = strings.TrimLeft(res, tokenSpaceChars) - } - if n.trimRight { - res = strings.TrimRight(res, tokenSpaceChars) - } - writer.WriteString(res) - return nil -} diff --git a/vendor/github.com/flosch/pongo2/nodes_wrapper.go b/vendor/github.com/flosch/pongo2/nodes_wrapper.go deleted file mode 100644 index d1bcb8d8..00000000 --- a/vendor/github.com/flosch/pongo2/nodes_wrapper.go +++ /dev/null @@ -1,16 +0,0 @@ -package pongo2 - -type NodeWrapper struct { - Endtag string - nodes []INode -} - -func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - for _, n := range wrapper.nodes { - err := n.Execute(ctx, writer) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/flosch/pongo2/options.go b/vendor/github.com/flosch/pongo2/options.go deleted file mode 100644 index 9c39e467..00000000 --- a/vendor/github.com/flosch/pongo2/options.go +++ /dev/null @@ -1,26 +0,0 @@ -package pongo2 - -// Options allow you to change the behavior of template-engine. -// You can change the options before calling the Execute method. -type Options struct { - // If this is set to true the first newline after a block is removed (block, not variable tag!). Defaults to false. - TrimBlocks bool - - // If this is set to true leading spaces and tabs are stripped from the start of a line to a block. Defaults to false - LStripBlocks bool -} - -func newOptions() *Options { - return &Options{ - TrimBlocks: false, - LStripBlocks: false, - } -} - -// Update updates this options from another options. -func (opt *Options) Update(other *Options) *Options { - opt.TrimBlocks = other.TrimBlocks - opt.LStripBlocks = other.LStripBlocks - - return opt -} diff --git a/vendor/github.com/flosch/pongo2/parser.go b/vendor/github.com/flosch/pongo2/parser.go deleted file mode 100644 index 19553f17..00000000 --- a/vendor/github.com/flosch/pongo2/parser.go +++ /dev/null @@ -1,309 +0,0 @@ -package pongo2 - -import ( - "fmt" - "strings" - - "errors" -) - -type INode interface { - Execute(*ExecutionContext, TemplateWriter) *Error -} - -type IEvaluator interface { - INode - GetPositionToken() *Token - Evaluate(*ExecutionContext) (*Value, *Error) - FilterApplied(name string) bool -} - -// The parser provides you a comprehensive and easy tool to -// work with the template document and arguments provided by -// the user for your custom tag. -// -// The parser works on a token list which will be provided by pongo2. -// A token is a unit you can work with. Tokens are either of type identifier, -// string, number, keyword, HTML or symbol. -// -// (See Token's documentation for more about tokens) -type Parser struct { - name string - idx int - tokens []*Token - lastToken *Token - - // if the parser parses a template document, here will be - // a reference to it (needed to access the template through Tags) - template *Template -} - -// Creates a new parser to parse tokens. -// Used inside pongo2 to parse documents and to provide an easy-to-use -// parser for tag authors -func newParser(name string, tokens []*Token, template *Template) *Parser { - p := &Parser{ - name: name, - tokens: tokens, - template: template, - } - if len(tokens) > 0 { - p.lastToken = tokens[len(tokens)-1] - } - return p -} - -// Consume one token. It will be gone forever. -func (p *Parser) Consume() { - p.ConsumeN(1) -} - -// Consume N tokens. They will be gone forever. -func (p *Parser) ConsumeN(count int) { - p.idx += count -} - -// Returns the current token. -func (p *Parser) Current() *Token { - return p.Get(p.idx) -} - -// Returns the CURRENT token if the given type matches. -// Consumes this token on success. -func (p *Parser) MatchType(typ TokenType) *Token { - if t := p.PeekType(typ); t != nil { - p.Consume() - return t - } - return nil -} - -// Returns the CURRENT token if the given type AND value matches. -// Consumes this token on success. -func (p *Parser) Match(typ TokenType, val string) *Token { - if t := p.Peek(typ, val); t != nil { - p.Consume() - return t - } - return nil -} - -// Returns the CURRENT token if the given type AND *one* of -// the given values matches. -// Consumes this token on success. -func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token { - for _, val := range vals { - if t := p.Peek(typ, val); t != nil { - p.Consume() - return t - } - } - return nil -} - -// Returns the CURRENT token if the given type matches. -// It DOES NOT consume the token. -func (p *Parser) PeekType(typ TokenType) *Token { - return p.PeekTypeN(0, typ) -} - -// Returns the CURRENT token if the given type AND value matches. -// It DOES NOT consume the token. -func (p *Parser) Peek(typ TokenType, val string) *Token { - return p.PeekN(0, typ, val) -} - -// Returns the CURRENT token if the given type AND *one* of -// the given values matches. -// It DOES NOT consume the token. -func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token { - for _, v := range vals { - t := p.PeekN(0, typ, v) - if t != nil { - return t - } - } - return nil -} - -// Returns the tokens[current position + shift] token if the -// given type AND value matches for that token. -// DOES NOT consume the token. -func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token { - t := p.Get(p.idx + shift) - if t != nil { - if t.Typ == typ && t.Val == val { - return t - } - } - return nil -} - -// Returns the tokens[current position + shift] token if the given type matches. -// DOES NOT consume the token for that token. -func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token { - t := p.Get(p.idx + shift) - if t != nil { - if t.Typ == typ { - return t - } - } - return nil -} - -// Returns the UNCONSUMED token count. -func (p *Parser) Remaining() int { - return len(p.tokens) - p.idx -} - -// Returns the total token count. -func (p *Parser) Count() int { - return len(p.tokens) -} - -// Returns tokens[i] or NIL (if i >= len(tokens)) -func (p *Parser) Get(i int) *Token { - if i < len(p.tokens) && i >= 0 { - return p.tokens[i] - } - return nil -} - -// Returns tokens[current-position + shift] or NIL -// (if (current-position + i) >= len(tokens)) -func (p *Parser) GetR(shift int) *Token { - i := p.idx + shift - return p.Get(i) -} - -// Error produces a nice error message and returns an error-object. -// The 'token'-argument is optional. If provided, it will take -// the token's position information. If not provided, it will -// automatically use the CURRENT token's position information. -func (p *Parser) Error(msg string, token *Token) *Error { - if token == nil { - // Set current token - token = p.Current() - if token == nil { - // Set to last token - if len(p.tokens) > 0 { - token = p.tokens[len(p.tokens)-1] - } - } - } - var line, col int - if token != nil { - line = token.Line - col = token.Col - } - return &Error{ - Template: p.template, - Filename: p.name, - Sender: "parser", - Line: line, - Column: col, - Token: token, - OrigError: errors.New(msg), - } -} - -// Wraps all nodes between starting tag and "{% endtag %}" and provides -// one simple interface to execute the wrapped nodes. -// It returns a parser to process provided arguments to the tag. -func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) { - wrapper := &NodeWrapper{} - - var tagArgs []*Token - - for p.Remaining() > 0 { - // New tag, check whether we have to stop wrapping here - if p.Peek(TokenSymbol, "{%") != nil { - tagIdent := p.PeekTypeN(1, TokenIdentifier) - - if tagIdent != nil { - // We've found a (!) end-tag - - found := false - for _, n := range names { - if tagIdent.Val == n { - found = true - break - } - } - - // We only process the tag if we've found an end tag - if found { - // Okay, endtag found. - p.ConsumeN(2) // '{%' tagname - - for { - if p.Match(TokenSymbol, "%}") != nil { - // Okay, end the wrapping here - wrapper.Endtag = tagIdent.Val - return wrapper, newParser(p.template.name, tagArgs, p.template), nil - } - t := p.Current() - p.Consume() - if t == nil { - return nil, nil, p.Error("Unexpected EOF.", p.lastToken) - } - tagArgs = append(tagArgs, t) - } - } - } - - } - - // Otherwise process next element to be wrapped - node, err := p.parseDocElement() - if err != nil { - return nil, nil, err - } - wrapper.nodes = append(wrapper.nodes, node) - } - - return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), - p.lastToken) -} - -// Skips all nodes between starting tag and "{% endtag %}" -func (p *Parser) SkipUntilTag(names ...string) *Error { - for p.Remaining() > 0 { - // New tag, check whether we have to stop wrapping here - if p.Peek(TokenSymbol, "{%") != nil { - tagIdent := p.PeekTypeN(1, TokenIdentifier) - - if tagIdent != nil { - // We've found a (!) end-tag - - found := false - for _, n := range names { - if tagIdent.Val == n { - found = true - break - } - } - - // We only process the tag if we've found an end tag - if found { - // Okay, endtag found. - p.ConsumeN(2) // '{%' tagname - - for { - if p.Match(TokenSymbol, "%}") != nil { - // Done skipping, exit. - return nil - } - } - } - } - } - t := p.Current() - p.Consume() - if t == nil { - return p.Error("Unexpected EOF.", p.lastToken) - } - } - - return p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), p.lastToken) -} diff --git a/vendor/github.com/flosch/pongo2/parser_document.go b/vendor/github.com/flosch/pongo2/parser_document.go deleted file mode 100644 index e3ac2c8e..00000000 --- a/vendor/github.com/flosch/pongo2/parser_document.go +++ /dev/null @@ -1,59 +0,0 @@ -package pongo2 - -// Doc = { ( Filter | Tag | HTML ) } -func (p *Parser) parseDocElement() (INode, *Error) { - t := p.Current() - - switch t.Typ { - case TokenHTML: - n := &nodeHTML{token: t} - left := p.PeekTypeN(-1, TokenSymbol) - right := p.PeekTypeN(1, TokenSymbol) - n.trimLeft = left != nil && left.TrimWhitespaces - n.trimRight = right != nil && right.TrimWhitespaces - p.Consume() // consume HTML element - return n, nil - case TokenSymbol: - switch t.Val { - case "{{": - // parse variable - variable, err := p.parseVariableElement() - if err != nil { - return nil, err - } - return variable, nil - case "{%": - // parse tag - tag, err := p.parseTagElement() - if err != nil { - return nil, err - } - return tag, nil - } - } - return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t) -} - -func (tpl *Template) parse() *Error { - tpl.parser = newParser(tpl.name, tpl.tokens, tpl) - doc, err := tpl.parser.parseDocument() - if err != nil { - return err - } - tpl.root = doc - return nil -} - -func (p *Parser) parseDocument() (*nodeDocument, *Error) { - doc := &nodeDocument{} - - for p.Remaining() > 0 { - node, err := p.parseDocElement() - if err != nil { - return nil, err - } - doc.Nodes = append(doc.Nodes, node) - } - - return doc, nil -} diff --git a/vendor/github.com/flosch/pongo2/parser_expression.go b/vendor/github.com/flosch/pongo2/parser_expression.go deleted file mode 100644 index 215b0afb..00000000 --- a/vendor/github.com/flosch/pongo2/parser_expression.go +++ /dev/null @@ -1,517 +0,0 @@ -package pongo2 - -import ( - "fmt" - "math" -) - -type Expression struct { - // TODO: Add location token? - expr1 IEvaluator - expr2 IEvaluator - opToken *Token -} - -type relationalExpression struct { - // TODO: Add location token? - expr1 IEvaluator - expr2 IEvaluator - opToken *Token -} - -type simpleExpression struct { - negate bool - negativeSign bool - term1 IEvaluator - term2 IEvaluator - opToken *Token -} - -type term struct { - // TODO: Add location token? - factor1 IEvaluator - factor2 IEvaluator - opToken *Token -} - -type power struct { - // TODO: Add location token? - power1 IEvaluator - power2 IEvaluator -} - -func (expr *Expression) FilterApplied(name string) bool { - return expr.expr1.FilterApplied(name) && (expr.expr2 == nil || - (expr.expr2 != nil && expr.expr2.FilterApplied(name))) -} - -func (expr *relationalExpression) FilterApplied(name string) bool { - return expr.expr1.FilterApplied(name) && (expr.expr2 == nil || - (expr.expr2 != nil && expr.expr2.FilterApplied(name))) -} - -func (expr *simpleExpression) FilterApplied(name string) bool { - return expr.term1.FilterApplied(name) && (expr.term2 == nil || - (expr.term2 != nil && expr.term2.FilterApplied(name))) -} - -func (expr *term) FilterApplied(name string) bool { - return expr.factor1.FilterApplied(name) && (expr.factor2 == nil || - (expr.factor2 != nil && expr.factor2.FilterApplied(name))) -} - -func (expr *power) FilterApplied(name string) bool { - return expr.power1.FilterApplied(name) && (expr.power2 == nil || - (expr.power2 != nil && expr.power2.FilterApplied(name))) -} - -func (expr *Expression) GetPositionToken() *Token { - return expr.expr1.GetPositionToken() -} - -func (expr *relationalExpression) GetPositionToken() *Token { - return expr.expr1.GetPositionToken() -} - -func (expr *simpleExpression) GetPositionToken() *Token { - return expr.term1.GetPositionToken() -} - -func (expr *term) GetPositionToken() *Token { - return expr.factor1.GetPositionToken() -} - -func (expr *power) GetPositionToken() *Token { - return expr.power1.GetPositionToken() -} - -func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := expr.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := expr.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := expr.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := expr.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := expr.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - v1, err := expr.expr1.Evaluate(ctx) - if err != nil { - return nil, err - } - if expr.expr2 != nil { - switch expr.opToken.Val { - case "and", "&&": - if !v1.IsTrue() { - return AsValue(false), nil - } else { - v2, err := expr.expr2.Evaluate(ctx) - if err != nil { - return nil, err - } - return AsValue(v2.IsTrue()), nil - } - case "or", "||": - if v1.IsTrue() { - return AsValue(true), nil - } else { - v2, err := expr.expr2.Evaluate(ctx) - if err != nil { - return nil, err - } - return AsValue(v2.IsTrue()), nil - } - default: - return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken) - } - } else { - return v1, nil - } -} - -func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - v1, err := expr.expr1.Evaluate(ctx) - if err != nil { - return nil, err - } - if expr.expr2 != nil { - v2, err := expr.expr2.Evaluate(ctx) - if err != nil { - return nil, err - } - switch expr.opToken.Val { - case "<=": - if v1.IsFloat() || v2.IsFloat() { - return AsValue(v1.Float() <= v2.Float()), nil - } - if v1.IsTime() && v2.IsTime() { - tm1, tm2 := v1.Time(), v2.Time() - return AsValue(tm1.Before(tm2) || tm1.Equal(tm2)), nil - } - return AsValue(v1.Integer() <= v2.Integer()), nil - case ">=": - if v1.IsFloat() || v2.IsFloat() { - return AsValue(v1.Float() >= v2.Float()), nil - } - if v1.IsTime() && v2.IsTime() { - tm1, tm2 := v1.Time(), v2.Time() - return AsValue(tm1.After(tm2) || tm1.Equal(tm2)), nil - } - return AsValue(v1.Integer() >= v2.Integer()), nil - case "==": - return AsValue(v1.EqualValueTo(v2)), nil - case ">": - if v1.IsFloat() || v2.IsFloat() { - return AsValue(v1.Float() > v2.Float()), nil - } - if v1.IsTime() && v2.IsTime() { - return AsValue(v1.Time().After(v2.Time())), nil - } - return AsValue(v1.Integer() > v2.Integer()), nil - case "<": - if v1.IsFloat() || v2.IsFloat() { - return AsValue(v1.Float() < v2.Float()), nil - } - if v1.IsTime() && v2.IsTime() { - return AsValue(v1.Time().Before(v2.Time())), nil - } - return AsValue(v1.Integer() < v2.Integer()), nil - case "!=", "<>": - return AsValue(!v1.EqualValueTo(v2)), nil - case "in": - return AsValue(v2.Contains(v1)), nil - default: - return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken) - } - } else { - return v1, nil - } -} - -func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - t1, err := expr.term1.Evaluate(ctx) - if err != nil { - return nil, err - } - result := t1 - - if expr.negate { - result = result.Negate() - } - - if expr.negativeSign { - if result.IsNumber() { - switch { - case result.IsFloat(): - result = AsValue(-1 * result.Float()) - case result.IsInteger(): - result = AsValue(-1 * result.Integer()) - default: - return nil, ctx.Error("Operation between a number and a non-(float/integer) is not possible", nil) - } - } else { - return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken()) - } - } - - if expr.term2 != nil { - t2, err := expr.term2.Evaluate(ctx) - if err != nil { - return nil, err - } - switch expr.opToken.Val { - case "+": - if result.IsFloat() || t2.IsFloat() { - // Result will be a float - return AsValue(result.Float() + t2.Float()), nil - } - // Result will be an integer - return AsValue(result.Integer() + t2.Integer()), nil - case "-": - if result.IsFloat() || t2.IsFloat() { - // Result will be a float - return AsValue(result.Float() - t2.Float()), nil - } - // Result will be an integer - return AsValue(result.Integer() - t2.Integer()), nil - default: - return nil, ctx.Error("Unimplemented", expr.GetPositionToken()) - } - } - - return result, nil -} - -func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - f1, err := expr.factor1.Evaluate(ctx) - if err != nil { - return nil, err - } - if expr.factor2 != nil { - f2, err := expr.factor2.Evaluate(ctx) - if err != nil { - return nil, err - } - switch expr.opToken.Val { - case "*": - if f1.IsFloat() || f2.IsFloat() { - // Result will be float - return AsValue(f1.Float() * f2.Float()), nil - } - // Result will be int - return AsValue(f1.Integer() * f2.Integer()), nil - case "/": - if f1.IsFloat() || f2.IsFloat() { - // Result will be float - return AsValue(f1.Float() / f2.Float()), nil - } - // Result will be int - return AsValue(f1.Integer() / f2.Integer()), nil - case "%": - // Result will be int - return AsValue(f1.Integer() % f2.Integer()), nil - default: - return nil, ctx.Error("unimplemented", expr.opToken) - } - } else { - return f1, nil - } -} - -func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - p1, err := expr.power1.Evaluate(ctx) - if err != nil { - return nil, err - } - if expr.power2 != nil { - p2, err := expr.power2.Evaluate(ctx) - if err != nil { - return nil, err - } - return AsValue(math.Pow(p1.Float(), p2.Float())), nil - } - return p1, nil -} - -func (p *Parser) parseFactor() (IEvaluator, *Error) { - if p.Match(TokenSymbol, "(") != nil { - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - if p.Match(TokenSymbol, ")") == nil { - return nil, p.Error("Closing bracket expected after expression", nil) - } - return expr, nil - } - - return p.parseVariableOrLiteralWithFilter() -} - -func (p *Parser) parsePower() (IEvaluator, *Error) { - pw := new(power) - - power1, err := p.parseFactor() - if err != nil { - return nil, err - } - pw.power1 = power1 - - if p.Match(TokenSymbol, "^") != nil { - power2, err := p.parsePower() - if err != nil { - return nil, err - } - pw.power2 = power2 - } - - if pw.power2 == nil { - // Shortcut for faster evaluation - return pw.power1, nil - } - - return pw, nil -} - -func (p *Parser) parseTerm() (IEvaluator, *Error) { - returnTerm := new(term) - - factor1, err := p.parsePower() - if err != nil { - return nil, err - } - returnTerm.factor1 = factor1 - - for p.PeekOne(TokenSymbol, "*", "/", "%") != nil { - if returnTerm.opToken != nil { - // Create new sub-term - returnTerm = &term{ - factor1: returnTerm, - } - } - - op := p.Current() - p.Consume() - - factor2, err := p.parsePower() - if err != nil { - return nil, err - } - - returnTerm.opToken = op - returnTerm.factor2 = factor2 - } - - if returnTerm.opToken == nil { - // Shortcut for faster evaluation - return returnTerm.factor1, nil - } - - return returnTerm, nil -} - -func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) { - expr := new(simpleExpression) - - if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil { - if sign.Val == "-" { - expr.negativeSign = true - } - } - - if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil { - expr.negate = true - } - - term1, err := p.parseTerm() - if err != nil { - return nil, err - } - expr.term1 = term1 - - for p.PeekOne(TokenSymbol, "+", "-") != nil { - if expr.opToken != nil { - // New sub expr - expr = &simpleExpression{ - term1: expr, - } - } - - op := p.Current() - p.Consume() - - term2, err := p.parseTerm() - if err != nil { - return nil, err - } - - expr.term2 = term2 - expr.opToken = op - } - - if expr.negate == false && expr.negativeSign == false && expr.term2 == nil { - // Shortcut for faster evaluation - return expr.term1, nil - } - - return expr, nil -} - -func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) { - expr1, err := p.parseSimpleExpression() - if err != nil { - return nil, err - } - - expr := &relationalExpression{ - expr1: expr1, - } - - if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil { - expr2, err := p.parseRelationalExpression() - if err != nil { - return nil, err - } - expr.opToken = t - expr.expr2 = expr2 - } else if t := p.MatchOne(TokenKeyword, "in"); t != nil { - expr2, err := p.parseSimpleExpression() - if err != nil { - return nil, err - } - expr.opToken = t - expr.expr2 = expr2 - } - - if expr.expr2 == nil { - // Shortcut for faster evaluation - return expr.expr1, nil - } - - return expr, nil -} - -func (p *Parser) ParseExpression() (IEvaluator, *Error) { - rexpr1, err := p.parseRelationalExpression() - if err != nil { - return nil, err - } - - exp := &Expression{ - expr1: rexpr1, - } - - if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil { - op := p.Current() - p.Consume() - expr2, err := p.ParseExpression() - if err != nil { - return nil, err - } - exp.expr2 = expr2 - exp.opToken = op - } - - if exp.expr2 == nil { - // Shortcut for faster evaluation - return exp.expr1, nil - } - - return exp, nil -} diff --git a/vendor/github.com/flosch/pongo2/pongo2.go b/vendor/github.com/flosch/pongo2/pongo2.go deleted file mode 100644 index eda3aa07..00000000 --- a/vendor/github.com/flosch/pongo2/pongo2.go +++ /dev/null @@ -1,14 +0,0 @@ -package pongo2 - -// Version string -const Version = "dev" - -// Must panics, if a Template couldn't successfully parsed. This is how you -// would use it: -// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html")) -func Must(tpl *Template, err error) *Template { - if err != nil { - panic(err) - } - return tpl -} diff --git a/vendor/github.com/flosch/pongo2/tags.go b/vendor/github.com/flosch/pongo2/tags.go deleted file mode 100644 index 710ee252..00000000 --- a/vendor/github.com/flosch/pongo2/tags.go +++ /dev/null @@ -1,133 +0,0 @@ -package pongo2 - -/* Incomplete: - ----------- - - verbatim (only the "name" argument is missing for verbatim) - - Reconsideration: - ---------------- - - debug (reason: not sure what to output yet) - regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go) - - Following built-in tags wont be added: - -------------------------------------- - - csrf_token (reason: web-framework specific) - load (reason: python-specific) - url (reason: web-framework specific) -*/ - -import ( - "fmt" -) - -type INodeTag interface { - INode -} - -// This is the function signature of the tag's parser you will have -// to implement in order to create a new tag. -// -// 'doc' is providing access to the whole document while 'arguments' -// is providing access to the user's arguments to the tag: -// -// {% your_tag_name some "arguments" 123 %} -// -// start_token will be the *Token with the tag's name in it (here: your_tag_name). -// -// Please see the Parser documentation on how to use the parser. -// See RegisterTag()'s documentation for more information about -// writing a tag as well. -type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) - -type tag struct { - name string - parser TagParser -} - -var tags map[string]*tag - -func init() { - tags = make(map[string]*tag) -} - -// Registers a new tag. You usually want to call this -// function in the tag's init() function: -// http://golang.org/doc/effective_go.html#init -// -// See http://www.florian-schlachter.de/post/pongo2/ for more about -// writing filters and tags. -func RegisterTag(name string, parserFn TagParser) error { - _, existing := tags[name] - if existing { - return fmt.Errorf("tag with name '%s' is already registered", name) - } - tags[name] = &tag{ - name: name, - parser: parserFn, - } - return nil -} - -// Replaces an already registered tag with a new implementation. Use this -// function with caution since it allows you to change existing tag behaviour. -func ReplaceTag(name string, parserFn TagParser) error { - _, existing := tags[name] - if !existing { - return fmt.Errorf("tag with name '%s' does not exist (therefore cannot be overridden)", name) - } - tags[name] = &tag{ - name: name, - parser: parserFn, - } - return nil -} - -// Tag = "{%" IDENT ARGS "%}" -func (p *Parser) parseTagElement() (INodeTag, *Error) { - p.Consume() // consume "{%" - tokenName := p.MatchType(TokenIdentifier) - - // Check for identifier - if tokenName == nil { - return nil, p.Error("Tag name must be an identifier.", nil) - } - - // Check for the existing tag - tag, exists := tags[tokenName.Val] - if !exists { - // Does not exists - return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName) - } - - // Check sandbox tag restriction - if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned { - return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName) - } - - var argsToken []*Token - for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 { - // Add token to args - argsToken = append(argsToken, p.Current()) - p.Consume() // next token - } - - // EOF? - if p.Remaining() == 0 { - return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken) - } - - p.Match(TokenSymbol, "%}") - - argParser := newParser(p.name, argsToken, p.template) - if len(argsToken) == 0 { - // This is done to have nice EOF error messages - argParser.lastToken = tokenName - } - - p.template.level++ - defer func() { p.template.level-- }() - return tag.parser(p, tokenName, argParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_autoescape.go b/vendor/github.com/flosch/pongo2/tags_autoescape.go deleted file mode 100644 index 590a1db3..00000000 --- a/vendor/github.com/flosch/pongo2/tags_autoescape.go +++ /dev/null @@ -1,52 +0,0 @@ -package pongo2 - -type tagAutoescapeNode struct { - wrapper *NodeWrapper - autoescape bool -} - -func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - old := ctx.Autoescape - ctx.Autoescape = node.autoescape - - err := node.wrapper.Execute(ctx, writer) - if err != nil { - return err - } - - ctx.Autoescape = old - - return nil -} - -func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - autoescapeNode := &tagAutoescapeNode{} - - wrapper, _, err := doc.WrapUntilTag("endautoescape") - if err != nil { - return nil, err - } - autoescapeNode.wrapper = wrapper - - modeToken := arguments.MatchType(TokenIdentifier) - if modeToken == nil { - return nil, arguments.Error("A mode is required for autoescape-tag.", nil) - } - if modeToken.Val == "on" { - autoescapeNode.autoescape = true - } else if modeToken.Val == "off" { - autoescapeNode.autoescape = false - } else { - return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil) - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed autoescape-tag arguments.", nil) - } - - return autoescapeNode, nil -} - -func init() { - RegisterTag("autoescape", tagAutoescapeParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_block.go b/vendor/github.com/flosch/pongo2/tags_block.go deleted file mode 100644 index 86145f32..00000000 --- a/vendor/github.com/flosch/pongo2/tags_block.go +++ /dev/null @@ -1,129 +0,0 @@ -package pongo2 - -import ( - "bytes" - "fmt" -) - -type tagBlockNode struct { - name string -} - -func (node *tagBlockNode) getBlockWrappers(tpl *Template) []*NodeWrapper { - nodeWrappers := make([]*NodeWrapper, 0) - var t *NodeWrapper - - for tpl != nil { - t = tpl.blocks[node.name] - if t != nil { - nodeWrappers = append(nodeWrappers, t) - } - tpl = tpl.child - } - - return nodeWrappers -} - -func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - tpl := ctx.template - if tpl == nil { - panic("internal error: tpl == nil") - } - - // Determine the block to execute - blockWrappers := node.getBlockWrappers(tpl) - lenBlockWrappers := len(blockWrappers) - - if lenBlockWrappers == 0 { - return ctx.Error("internal error: len(block_wrappers) == 0 in tagBlockNode.Execute()", nil) - } - - blockWrapper := blockWrappers[lenBlockWrappers-1] - ctx.Private["block"] = tagBlockInformation{ - ctx: ctx, - wrappers: blockWrappers[0 : lenBlockWrappers-1], - } - err := blockWrapper.Execute(ctx, writer) - if err != nil { - return err - } - - return nil -} - -type tagBlockInformation struct { - ctx *ExecutionContext - wrappers []*NodeWrapper -} - -func (t tagBlockInformation) Super() string { - lenWrappers := len(t.wrappers) - - if lenWrappers == 0 { - return "" - } - - superCtx := NewChildExecutionContext(t.ctx) - superCtx.Private["block"] = tagBlockInformation{ - ctx: t.ctx, - wrappers: t.wrappers[0 : lenWrappers-1], - } - - blockWrapper := t.wrappers[lenWrappers-1] - buf := bytes.NewBufferString("") - err := blockWrapper.Execute(superCtx, &templateWriter{buf}) - if err != nil { - return "" - } - return buf.String() -} - -func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - if arguments.Count() == 0 { - return nil, arguments.Error("Tag 'block' requires an identifier.", nil) - } - - nameToken := arguments.MatchType(TokenIdentifier) - if nameToken == nil { - return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil) - } - - if arguments.Remaining() != 0 { - return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil) - } - - wrapper, endtagargs, err := doc.WrapUntilTag("endblock") - if err != nil { - return nil, err - } - if endtagargs.Remaining() > 0 { - endtagnameToken := endtagargs.MatchType(TokenIdentifier) - if endtagnameToken != nil { - if endtagnameToken.Val != nameToken.Val { - return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').", - nameToken.Val, endtagnameToken.Val), nil) - } - } - - if endtagnameToken == nil || endtagargs.Remaining() > 0 { - return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil) - } - } - - tpl := doc.template - if tpl == nil { - panic("internal error: tpl == nil") - } - _, hasBlock := tpl.blocks[nameToken.Val] - if !hasBlock { - tpl.blocks[nameToken.Val] = wrapper - } else { - return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil) - } - - return &tagBlockNode{name: nameToken.Val}, nil -} - -func init() { - RegisterTag("block", tagBlockParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_comment.go b/vendor/github.com/flosch/pongo2/tags_comment.go deleted file mode 100644 index 56a02ed9..00000000 --- a/vendor/github.com/flosch/pongo2/tags_comment.go +++ /dev/null @@ -1,27 +0,0 @@ -package pongo2 - -type tagCommentNode struct{} - -func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - return nil -} - -func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - commentNode := &tagCommentNode{} - - // TODO: Process the endtag's arguments (see django 'comment'-tag documentation) - err := doc.SkipUntilTag("endcomment") - if err != nil { - return nil, err - } - - if arguments.Count() != 0 { - return nil, arguments.Error("Tag 'comment' does not take any argument.", nil) - } - - return commentNode, nil -} - -func init() { - RegisterTag("comment", tagCommentParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_cycle.go b/vendor/github.com/flosch/pongo2/tags_cycle.go deleted file mode 100644 index ffbd254e..00000000 --- a/vendor/github.com/flosch/pongo2/tags_cycle.go +++ /dev/null @@ -1,106 +0,0 @@ -package pongo2 - -type tagCycleValue struct { - node *tagCycleNode - value *Value -} - -type tagCycleNode struct { - position *Token - args []IEvaluator - idx int - asName string - silent bool -} - -func (cv *tagCycleValue) String() string { - return cv.value.String() -} - -func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - item := node.args[node.idx%len(node.args)] - node.idx++ - - val, err := item.Evaluate(ctx) - if err != nil { - return err - } - - if t, ok := val.Interface().(*tagCycleValue); ok { - // {% cycle "test1" "test2" - // {% cycle cycleitem %} - - // Update the cycle value with next value - item := t.node.args[t.node.idx%len(t.node.args)] - t.node.idx++ - - val, err := item.Evaluate(ctx) - if err != nil { - return err - } - - t.value = val - - if !t.node.silent { - writer.WriteString(val.String()) - } - } else { - // Regular call - - cycleValue := &tagCycleValue{ - node: node, - value: val, - } - - if node.asName != "" { - ctx.Private[node.asName] = cycleValue - } - if !node.silent { - writer.WriteString(val.String()) - } - } - - return nil -} - -// HINT: We're not supporting the old comma-separated list of expressions argument-style -func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - cycleNode := &tagCycleNode{ - position: start, - } - - for arguments.Remaining() > 0 { - node, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - cycleNode.args = append(cycleNode.args, node) - - if arguments.MatchOne(TokenKeyword, "as") != nil { - // as - - nameToken := arguments.MatchType(TokenIdentifier) - if nameToken == nil { - return nil, arguments.Error("Name (identifier) expected after 'as'.", nil) - } - cycleNode.asName = nameToken.Val - - if arguments.MatchOne(TokenIdentifier, "silent") != nil { - cycleNode.silent = true - } - - // Now we're finished - break - } - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed cycle-tag.", nil) - } - - return cycleNode, nil -} - -func init() { - RegisterTag("cycle", tagCycleParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_extends.go b/vendor/github.com/flosch/pongo2/tags_extends.go deleted file mode 100644 index 5771020a..00000000 --- a/vendor/github.com/flosch/pongo2/tags_extends.go +++ /dev/null @@ -1,52 +0,0 @@ -package pongo2 - -type tagExtendsNode struct { - filename string -} - -func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - return nil -} - -func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - extendsNode := &tagExtendsNode{} - - if doc.template.level > 1 { - return nil, arguments.Error("The 'extends' tag can only defined on root level.", start) - } - - if doc.template.parent != nil { - // Already one parent - return nil, arguments.Error("This template has already one parent.", start) - } - - if filenameToken := arguments.MatchType(TokenString); filenameToken != nil { - // prepared, static template - - // Get parent's filename - parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val) - - // Parse the parent - parentTemplate, err := doc.template.set.FromFile(parentFilename) - if err != nil { - return nil, err.(*Error) - } - - // Keep track of things - parentTemplate.child = doc.template - doc.template.parent = parentTemplate - extendsNode.filename = parentFilename - } else { - return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil) - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil) - } - - return extendsNode, nil -} - -func init() { - RegisterTag("extends", tagExtendsParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_filter.go b/vendor/github.com/flosch/pongo2/tags_filter.go deleted file mode 100644 index b38fd929..00000000 --- a/vendor/github.com/flosch/pongo2/tags_filter.go +++ /dev/null @@ -1,95 +0,0 @@ -package pongo2 - -import ( - "bytes" -) - -type nodeFilterCall struct { - name string - paramExpr IEvaluator -} - -type tagFilterNode struct { - position *Token - bodyWrapper *NodeWrapper - filterChain []*nodeFilterCall -} - -func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size - - err := node.bodyWrapper.Execute(ctx, temp) - if err != nil { - return err - } - - value := AsValue(temp.String()) - - for _, call := range node.filterChain { - var param *Value - if call.paramExpr != nil { - param, err = call.paramExpr.Evaluate(ctx) - if err != nil { - return err - } - } else { - param = AsValue(nil) - } - value, err = ApplyFilter(call.name, value, param) - if err != nil { - return ctx.Error(err.Error(), node.position) - } - } - - writer.WriteString(value.String()) - - return nil -} - -func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - filterNode := &tagFilterNode{ - position: start, - } - - wrapper, _, err := doc.WrapUntilTag("endfilter") - if err != nil { - return nil, err - } - filterNode.bodyWrapper = wrapper - - for arguments.Remaining() > 0 { - filterCall := &nodeFilterCall{} - - nameToken := arguments.MatchType(TokenIdentifier) - if nameToken == nil { - return nil, arguments.Error("Expected a filter name (identifier).", nil) - } - filterCall.name = nameToken.Val - - if arguments.MatchOne(TokenSymbol, ":") != nil { - // Filter parameter - // NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list - expr, err := arguments.parseVariableOrLiteral() - if err != nil { - return nil, err - } - filterCall.paramExpr = expr - } - - filterNode.filterChain = append(filterNode.filterChain, filterCall) - - if arguments.MatchOne(TokenSymbol, "|") == nil { - break - } - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed filter-tag arguments.", nil) - } - - return filterNode, nil -} - -func init() { - RegisterTag("filter", tagFilterParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_firstof.go b/vendor/github.com/flosch/pongo2/tags_firstof.go deleted file mode 100644 index 5b2888e2..00000000 --- a/vendor/github.com/flosch/pongo2/tags_firstof.go +++ /dev/null @@ -1,49 +0,0 @@ -package pongo2 - -type tagFirstofNode struct { - position *Token - args []IEvaluator -} - -func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - for _, arg := range node.args { - val, err := arg.Evaluate(ctx) - if err != nil { - return err - } - - if val.IsTrue() { - if ctx.Autoescape && !arg.FilterApplied("safe") { - val, err = ApplyFilter("escape", val, nil) - if err != nil { - return err - } - } - - writer.WriteString(val.String()) - return nil - } - } - - return nil -} - -func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - firstofNode := &tagFirstofNode{ - position: start, - } - - for arguments.Remaining() > 0 { - node, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - firstofNode.args = append(firstofNode.args, node) - } - - return firstofNode, nil -} - -func init() { - RegisterTag("firstof", tagFirstofParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_for.go b/vendor/github.com/flosch/pongo2/tags_for.go deleted file mode 100644 index 5b0b5554..00000000 --- a/vendor/github.com/flosch/pongo2/tags_for.go +++ /dev/null @@ -1,159 +0,0 @@ -package pongo2 - -type tagForNode struct { - key string - value string // only for maps: for key, value in map - objectEvaluator IEvaluator - reversed bool - sorted bool - - bodyWrapper *NodeWrapper - emptyWrapper *NodeWrapper -} - -type tagForLoopInformation struct { - Counter int - Counter0 int - Revcounter int - Revcounter0 int - First bool - Last bool - Parentloop *tagForLoopInformation -} - -func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) { - // Backup forloop (as parentloop in public context), key-name and value-name - forCtx := NewChildExecutionContext(ctx) - parentloop := forCtx.Private["forloop"] - - // Create loop struct - loopInfo := &tagForLoopInformation{ - First: true, - } - - // Is it a loop in a loop? - if parentloop != nil { - loopInfo.Parentloop = parentloop.(*tagForLoopInformation) - } - - // Register loopInfo in public context - forCtx.Private["forloop"] = loopInfo - - obj, err := node.objectEvaluator.Evaluate(forCtx) - if err != nil { - return err - } - - obj.IterateOrder(func(idx, count int, key, value *Value) bool { - // There's something to iterate over (correct type and at least 1 item) - - // Update loop infos and public context - forCtx.Private[node.key] = key - if value != nil { - forCtx.Private[node.value] = value - } - loopInfo.Counter = idx + 1 - loopInfo.Counter0 = idx - if idx == 1 { - loopInfo.First = false - } - if idx+1 == count { - loopInfo.Last = true - } - loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up - loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up - - // Render elements with updated context - err := node.bodyWrapper.Execute(forCtx, writer) - if err != nil { - forError = err - return false - } - return true - }, func() { - // Nothing to iterate over (maybe wrong type or no items) - if node.emptyWrapper != nil { - err := node.emptyWrapper.Execute(forCtx, writer) - if err != nil { - forError = err - } - } - }, node.reversed, node.sorted) - - return forError -} - -func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - forNode := &tagForNode{} - - // Arguments parsing - var valueToken *Token - keyToken := arguments.MatchType(TokenIdentifier) - if keyToken == nil { - return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil) - } - - if arguments.Match(TokenSymbol, ",") != nil { - // Value name is provided - valueToken = arguments.MatchType(TokenIdentifier) - if valueToken == nil { - return nil, arguments.Error("Value name must be an identifier.", nil) - } - } - - if arguments.Match(TokenKeyword, "in") == nil { - return nil, arguments.Error("Expected keyword 'in'.", nil) - } - - objectEvaluator, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - forNode.objectEvaluator = objectEvaluator - forNode.key = keyToken.Val - if valueToken != nil { - forNode.value = valueToken.Val - } - - if arguments.MatchOne(TokenIdentifier, "reversed") != nil { - forNode.reversed = true - } - - if arguments.MatchOne(TokenIdentifier, "sorted") != nil { - forNode.sorted = true - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed for-loop arguments.", nil) - } - - // Body wrapping - wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor") - if err != nil { - return nil, err - } - forNode.bodyWrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - - if wrapper.Endtag == "empty" { - // if there's an else in the if-statement, we need the else-Block as well - wrapper, endargs, err = doc.WrapUntilTag("endfor") - if err != nil { - return nil, err - } - forNode.emptyWrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - } - - return forNode, nil -} - -func init() { - RegisterTag("for", tagForParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_if.go b/vendor/github.com/flosch/pongo2/tags_if.go deleted file mode 100644 index 3eeaf3b4..00000000 --- a/vendor/github.com/flosch/pongo2/tags_if.go +++ /dev/null @@ -1,76 +0,0 @@ -package pongo2 - -type tagIfNode struct { - conditions []IEvaluator - wrappers []*NodeWrapper -} - -func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - for i, condition := range node.conditions { - result, err := condition.Evaluate(ctx) - if err != nil { - return err - } - - if result.IsTrue() { - return node.wrappers[i].Execute(ctx, writer) - } - // Last condition? - if len(node.conditions) == i+1 && len(node.wrappers) > i+1 { - return node.wrappers[i+1].Execute(ctx, writer) - } - } - return nil -} - -func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - ifNode := &tagIfNode{} - - // Parse first and main IF condition - condition, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - ifNode.conditions = append(ifNode.conditions, condition) - - if arguments.Remaining() > 0 { - return nil, arguments.Error("If-condition is malformed.", nil) - } - - // Check the rest - for { - wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif") - if err != nil { - return nil, err - } - ifNode.wrappers = append(ifNode.wrappers, wrapper) - - if wrapper.Endtag == "elif" { - // elif can take a condition - condition, err = tagArgs.ParseExpression() - if err != nil { - return nil, err - } - ifNode.conditions = append(ifNode.conditions, condition) - - if tagArgs.Remaining() > 0 { - return nil, tagArgs.Error("Elif-condition is malformed.", nil) - } - } else { - if tagArgs.Count() > 0 { - // else/endif can't take any conditions - return nil, tagArgs.Error("Arguments not allowed here.", nil) - } - } - - if wrapper.Endtag == "endif" { - break - } - } - - return ifNode, nil -} - -func init() { - RegisterTag("if", tagIfParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_ifchanged.go b/vendor/github.com/flosch/pongo2/tags_ifchanged.go deleted file mode 100644 index 45296a0a..00000000 --- a/vendor/github.com/flosch/pongo2/tags_ifchanged.go +++ /dev/null @@ -1,116 +0,0 @@ -package pongo2 - -import ( - "bytes" -) - -type tagIfchangedNode struct { - watchedExpr []IEvaluator - lastValues []*Value - lastContent []byte - thenWrapper *NodeWrapper - elseWrapper *NodeWrapper -} - -func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - if len(node.watchedExpr) == 0 { - // Check against own rendered body - - buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB - err := node.thenWrapper.Execute(ctx, buf) - if err != nil { - return err - } - - bufBytes := buf.Bytes() - if !bytes.Equal(node.lastContent, bufBytes) { - // Rendered content changed, output it - writer.Write(bufBytes) - node.lastContent = bufBytes - } - } else { - nowValues := make([]*Value, 0, len(node.watchedExpr)) - for _, expr := range node.watchedExpr { - val, err := expr.Evaluate(ctx) - if err != nil { - return err - } - nowValues = append(nowValues, val) - } - - // Compare old to new values now - changed := len(node.lastValues) == 0 - - for idx, oldVal := range node.lastValues { - if !oldVal.EqualValueTo(nowValues[idx]) { - changed = true - break // we can stop here because ONE value changed - } - } - - node.lastValues = nowValues - - if changed { - // Render thenWrapper - err := node.thenWrapper.Execute(ctx, writer) - if err != nil { - return err - } - } else { - // Render elseWrapper - err := node.elseWrapper.Execute(ctx, writer) - if err != nil { - return err - } - } - } - - return nil -} - -func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - ifchangedNode := &tagIfchangedNode{} - - for arguments.Remaining() > 0 { - // Parse condition - expr, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr) - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Ifchanged-arguments are malformed.", nil) - } - - // Wrap then/else-blocks - wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged") - if err != nil { - return nil, err - } - ifchangedNode.thenWrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - - if wrapper.Endtag == "else" { - // if there's an else in the if-statement, we need the else-Block as well - wrapper, endargs, err = doc.WrapUntilTag("endifchanged") - if err != nil { - return nil, err - } - ifchangedNode.elseWrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - } - - return ifchangedNode, nil -} - -func init() { - RegisterTag("ifchanged", tagIfchangedParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_ifequal.go b/vendor/github.com/flosch/pongo2/tags_ifequal.go deleted file mode 100644 index 103f1c7b..00000000 --- a/vendor/github.com/flosch/pongo2/tags_ifequal.go +++ /dev/null @@ -1,78 +0,0 @@ -package pongo2 - -type tagIfEqualNode struct { - var1, var2 IEvaluator - thenWrapper *NodeWrapper - elseWrapper *NodeWrapper -} - -func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - r1, err := node.var1.Evaluate(ctx) - if err != nil { - return err - } - r2, err := node.var2.Evaluate(ctx) - if err != nil { - return err - } - - result := r1.EqualValueTo(r2) - - if result { - return node.thenWrapper.Execute(ctx, writer) - } - if node.elseWrapper != nil { - return node.elseWrapper.Execute(ctx, writer) - } - return nil -} - -func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - ifequalNode := &tagIfEqualNode{} - - // Parse two expressions - var1, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - var2, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - ifequalNode.var1 = var1 - ifequalNode.var2 = var2 - - if arguments.Remaining() > 0 { - return nil, arguments.Error("ifequal only takes 2 arguments.", nil) - } - - // Wrap then/else-blocks - wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal") - if err != nil { - return nil, err - } - ifequalNode.thenWrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - - if wrapper.Endtag == "else" { - // if there's an else in the if-statement, we need the else-Block as well - wrapper, endargs, err = doc.WrapUntilTag("endifequal") - if err != nil { - return nil, err - } - ifequalNode.elseWrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - } - - return ifequalNode, nil -} - -func init() { - RegisterTag("ifequal", tagIfEqualParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go b/vendor/github.com/flosch/pongo2/tags_ifnotequal.go deleted file mode 100644 index 0d287d34..00000000 --- a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go +++ /dev/null @@ -1,78 +0,0 @@ -package pongo2 - -type tagIfNotEqualNode struct { - var1, var2 IEvaluator - thenWrapper *NodeWrapper - elseWrapper *NodeWrapper -} - -func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - r1, err := node.var1.Evaluate(ctx) - if err != nil { - return err - } - r2, err := node.var2.Evaluate(ctx) - if err != nil { - return err - } - - result := !r1.EqualValueTo(r2) - - if result { - return node.thenWrapper.Execute(ctx, writer) - } - if node.elseWrapper != nil { - return node.elseWrapper.Execute(ctx, writer) - } - return nil -} - -func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - ifnotequalNode := &tagIfNotEqualNode{} - - // Parse two expressions - var1, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - var2, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - ifnotequalNode.var1 = var1 - ifnotequalNode.var2 = var2 - - if arguments.Remaining() > 0 { - return nil, arguments.Error("ifequal only takes 2 arguments.", nil) - } - - // Wrap then/else-blocks - wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal") - if err != nil { - return nil, err - } - ifnotequalNode.thenWrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - - if wrapper.Endtag == "else" { - // if there's an else in the if-statement, we need the else-Block as well - wrapper, endargs, err = doc.WrapUntilTag("endifnotequal") - if err != nil { - return nil, err - } - ifnotequalNode.elseWrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - } - - return ifnotequalNode, nil -} - -func init() { - RegisterTag("ifnotequal", tagIfNotEqualParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_import.go b/vendor/github.com/flosch/pongo2/tags_import.go deleted file mode 100644 index 7e0d6a01..00000000 --- a/vendor/github.com/flosch/pongo2/tags_import.go +++ /dev/null @@ -1,84 +0,0 @@ -package pongo2 - -import ( - "fmt" -) - -type tagImportNode struct { - position *Token - filename string - macros map[string]*tagMacroNode // alias/name -> macro instance -} - -func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - for name, macro := range node.macros { - func(name string, macro *tagMacroNode) { - ctx.Private[name] = func(args ...*Value) *Value { - return macro.call(ctx, args...) - } - }(name, macro) - } - return nil -} - -func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - importNode := &tagImportNode{ - position: start, - macros: make(map[string]*tagMacroNode), - } - - filenameToken := arguments.MatchType(TokenString) - if filenameToken == nil { - return nil, arguments.Error("Import-tag needs a filename as string.", nil) - } - - importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val) - - if arguments.Remaining() == 0 { - return nil, arguments.Error("You must at least specify one macro to import.", nil) - } - - // Compile the given template - tpl, err := doc.template.set.FromFile(importNode.filename) - if err != nil { - return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start) - } - - for arguments.Remaining() > 0 { - macroNameToken := arguments.MatchType(TokenIdentifier) - if macroNameToken == nil { - return nil, arguments.Error("Expected macro name (identifier).", nil) - } - - asName := macroNameToken.Val - if arguments.Match(TokenKeyword, "as") != nil { - aliasToken := arguments.MatchType(TokenIdentifier) - if aliasToken == nil { - return nil, arguments.Error("Expected macro alias name (identifier).", nil) - } - asName = aliasToken.Val - } - - macroInstance, has := tpl.exportedMacros[macroNameToken.Val] - if !has { - return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val, - importNode.filename), macroNameToken) - } - - importNode.macros[asName] = macroInstance - - if arguments.Remaining() == 0 { - break - } - - if arguments.Match(TokenSymbol, ",") == nil { - return nil, arguments.Error("Expected ','.", nil) - } - } - - return importNode, nil -} - -func init() { - RegisterTag("import", tagImportParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_include.go b/vendor/github.com/flosch/pongo2/tags_include.go deleted file mode 100644 index 6d619fda..00000000 --- a/vendor/github.com/flosch/pongo2/tags_include.go +++ /dev/null @@ -1,146 +0,0 @@ -package pongo2 - -type tagIncludeNode struct { - tpl *Template - filenameEvaluator IEvaluator - lazy bool - only bool - filename string - withPairs map[string]IEvaluator - ifExists bool -} - -func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - // Building the context for the template - includeCtx := make(Context) - - // Fill the context with all data from the parent - if !node.only { - includeCtx.Update(ctx.Public) - includeCtx.Update(ctx.Private) - } - - // Put all custom with-pairs into the context - for key, value := range node.withPairs { - val, err := value.Evaluate(ctx) - if err != nil { - return err - } - includeCtx[key] = val - } - - // Execute the template - if node.lazy { - // Evaluate the filename - filename, err := node.filenameEvaluator.Evaluate(ctx) - if err != nil { - return err - } - - if filename.String() == "" { - return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil) - } - - // Get include-filename - includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String()) - - includedTpl, err2 := ctx.template.set.FromFile(includedFilename) - if err2 != nil { - // if this is ReadFile error, and "if_exists" flag is enabled - if node.ifExists && err2.(*Error).Sender == "fromfile" { - return nil - } - return err2.(*Error) - } - err2 = includedTpl.ExecuteWriter(includeCtx, writer) - if err2 != nil { - return err2.(*Error) - } - return nil - } - // Template is already parsed with static filename - err := node.tpl.ExecuteWriter(includeCtx, writer) - if err != nil { - return err.(*Error) - } - return nil -} - -type tagIncludeEmptyNode struct{} - -func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - return nil -} - -func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - includeNode := &tagIncludeNode{ - withPairs: make(map[string]IEvaluator), - } - - if filenameToken := arguments.MatchType(TokenString); filenameToken != nil { - // prepared, static template - - // "if_exists" flag - ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil - - // Get include-filename - includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val) - - // Parse the parent - includeNode.filename = includedFilename - includedTpl, err := doc.template.set.FromFile(includedFilename) - if err != nil { - // if this is ReadFile error, and "if_exists" token presents we should create and empty node - if err.(*Error).Sender == "fromfile" && ifExists { - return &tagIncludeEmptyNode{}, nil - } - return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken) - } - includeNode.tpl = includedTpl - } else { - // No String, then the user wants to use lazy-evaluation (slower, but possible) - filenameEvaluator, err := arguments.ParseExpression() - if err != nil { - return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken) - } - includeNode.filenameEvaluator = filenameEvaluator - includeNode.lazy = true - includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag - } - - // After having parsed the filename we're gonna parse the with+only options - if arguments.Match(TokenIdentifier, "with") != nil { - for arguments.Remaining() > 0 { - // We have at least one key=expr pair (because of starting "with") - keyToken := arguments.MatchType(TokenIdentifier) - if keyToken == nil { - return nil, arguments.Error("Expected an identifier", nil) - } - if arguments.Match(TokenSymbol, "=") == nil { - return nil, arguments.Error("Expected '='.", nil) - } - valueExpr, err := arguments.ParseExpression() - if err != nil { - return nil, err.updateFromTokenIfNeeded(doc.template, keyToken) - } - - includeNode.withPairs[keyToken.Val] = valueExpr - - // Only? - if arguments.Match(TokenIdentifier, "only") != nil { - includeNode.only = true - break // stop parsing arguments because it's the last option - } - } - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed 'include'-tag arguments.", nil) - } - - return includeNode, nil -} - -func init() { - RegisterTag("include", tagIncludeParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_lorem.go b/vendor/github.com/flosch/pongo2/tags_lorem.go deleted file mode 100644 index 7794f6c1..00000000 --- a/vendor/github.com/flosch/pongo2/tags_lorem.go +++ /dev/null @@ -1,132 +0,0 @@ -package pongo2 - -import ( - "fmt" - "math/rand" - "strings" - "time" -) - -var ( - tagLoremParagraphs = strings.Split(tagLoremText, "\n") - tagLoremWords = strings.Fields(tagLoremText) -) - -type tagLoremNode struct { - position *Token - count int // number of paragraphs - method string // w = words, p = HTML paragraphs, b = plain-text (default is b) - random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..." -} - -func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - switch node.method { - case "b": - if node.random { - for i := 0; i < node.count; i++ { - if i > 0 { - writer.WriteString("\n") - } - par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))] - writer.WriteString(par) - } - } else { - for i := 0; i < node.count; i++ { - if i > 0 { - writer.WriteString("\n") - } - par := tagLoremParagraphs[i%len(tagLoremParagraphs)] - writer.WriteString(par) - } - } - case "w": - if node.random { - for i := 0; i < node.count; i++ { - if i > 0 { - writer.WriteString(" ") - } - word := tagLoremWords[rand.Intn(len(tagLoremWords))] - writer.WriteString(word) - } - } else { - for i := 0; i < node.count; i++ { - if i > 0 { - writer.WriteString(" ") - } - word := tagLoremWords[i%len(tagLoremWords)] - writer.WriteString(word) - } - } - case "p": - if node.random { - for i := 0; i < node.count; i++ { - if i > 0 { - writer.WriteString("\n") - } - writer.WriteString("

") - par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))] - writer.WriteString(par) - writer.WriteString("

") - } - } else { - for i := 0; i < node.count; i++ { - if i > 0 { - writer.WriteString("\n") - } - writer.WriteString("

") - par := tagLoremParagraphs[i%len(tagLoremParagraphs)] - writer.WriteString(par) - writer.WriteString("

") - - } - } - default: - return ctx.OrigError(fmt.Errorf("unsupported method: %s", node.method), nil) - } - - return nil -} - -func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - loremNode := &tagLoremNode{ - position: start, - count: 1, - method: "b", - } - - if countToken := arguments.MatchType(TokenNumber); countToken != nil { - loremNode.count = AsValue(countToken.Val).Integer() - } - - if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil { - if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" { - return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil) - } - - loremNode.method = methodToken.Val - } - - if arguments.MatchOne(TokenIdentifier, "random") != nil { - loremNode.random = true - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed lorem-tag arguments.", nil) - } - - return loremNode, nil -} - -func init() { - rand.Seed(time.Now().Unix()) - - RegisterTag("lorem", tagLoremParser) -} - -const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. -Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. -Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. -Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. -Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis. -At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat. -Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.` diff --git a/vendor/github.com/flosch/pongo2/tags_macro.go b/vendor/github.com/flosch/pongo2/tags_macro.go deleted file mode 100644 index dd3e0bf4..00000000 --- a/vendor/github.com/flosch/pongo2/tags_macro.go +++ /dev/null @@ -1,149 +0,0 @@ -package pongo2 - -import ( - "bytes" - "fmt" -) - -type tagMacroNode struct { - position *Token - name string - argsOrder []string - args map[string]IEvaluator - exported bool - - wrapper *NodeWrapper -} - -func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - ctx.Private[node.name] = func(args ...*Value) *Value { - return node.call(ctx, args...) - } - - return nil -} - -func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value { - argsCtx := make(Context) - - for k, v := range node.args { - if v == nil { - // User did not provided a default value - argsCtx[k] = nil - } else { - // Evaluate the default value - valueExpr, err := v.Evaluate(ctx) - if err != nil { - ctx.Logf(err.Error()) - return AsSafeValue(err.Error()) - } - - argsCtx[k] = valueExpr - } - } - - if len(args) > len(node.argsOrder) { - // Too many arguments, we're ignoring them and just logging into debug mode. - err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).", - node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position) - - ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods - return AsSafeValue(err.Error()) - } - - // Make a context for the macro execution - macroCtx := NewChildExecutionContext(ctx) - - // Register all arguments in the private context - macroCtx.Private.Update(argsCtx) - - for idx, argValue := range args { - macroCtx.Private[node.argsOrder[idx]] = argValue.Interface() - } - - var b bytes.Buffer - err := node.wrapper.Execute(macroCtx, &b) - if err != nil { - return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error()) - } - - return AsSafeValue(b.String()) -} - -func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - macroNode := &tagMacroNode{ - position: start, - args: make(map[string]IEvaluator), - } - - nameToken := arguments.MatchType(TokenIdentifier) - if nameToken == nil { - return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil) - } - macroNode.name = nameToken.Val - - if arguments.MatchOne(TokenSymbol, "(") == nil { - return nil, arguments.Error("Expected '('.", nil) - } - - for arguments.Match(TokenSymbol, ")") == nil { - argNameToken := arguments.MatchType(TokenIdentifier) - if argNameToken == nil { - return nil, arguments.Error("Expected argument name as identifier.", nil) - } - macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val) - - if arguments.Match(TokenSymbol, "=") != nil { - // Default expression follows - argDefaultExpr, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - macroNode.args[argNameToken.Val] = argDefaultExpr - } else { - // No default expression - macroNode.args[argNameToken.Val] = nil - } - - if arguments.Match(TokenSymbol, ")") != nil { - break - } - if arguments.Match(TokenSymbol, ",") == nil { - return nil, arguments.Error("Expected ',' or ')'.", nil) - } - } - - if arguments.Match(TokenKeyword, "export") != nil { - macroNode.exported = true - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed macro-tag.", nil) - } - - // Body wrapping - wrapper, endargs, err := doc.WrapUntilTag("endmacro") - if err != nil { - return nil, err - } - macroNode.wrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - - if macroNode.exported { - // Now register the macro if it wants to be exported - _, has := doc.template.exportedMacros[macroNode.name] - if has { - return nil, doc.Error(fmt.Sprintf("another macro with name '%s' already exported", macroNode.name), start) - } - doc.template.exportedMacros[macroNode.name] = macroNode - } - - return macroNode, nil -} - -func init() { - RegisterTag("macro", tagMacroParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_now.go b/vendor/github.com/flosch/pongo2/tags_now.go deleted file mode 100644 index d9fa4a37..00000000 --- a/vendor/github.com/flosch/pongo2/tags_now.go +++ /dev/null @@ -1,50 +0,0 @@ -package pongo2 - -import ( - "time" -) - -type tagNowNode struct { - position *Token - format string - fake bool -} - -func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - var t time.Time - if node.fake { - t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC) - } else { - t = time.Now() - } - - writer.WriteString(t.Format(node.format)) - - return nil -} - -func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - nowNode := &tagNowNode{ - position: start, - } - - formatToken := arguments.MatchType(TokenString) - if formatToken == nil { - return nil, arguments.Error("Expected a format string.", nil) - } - nowNode.format = formatToken.Val - - if arguments.MatchOne(TokenIdentifier, "fake") != nil { - nowNode.fake = true - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed now-tag arguments.", nil) - } - - return nowNode, nil -} - -func init() { - RegisterTag("now", tagNowParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_set.go b/vendor/github.com/flosch/pongo2/tags_set.go deleted file mode 100644 index be121c12..00000000 --- a/vendor/github.com/flosch/pongo2/tags_set.go +++ /dev/null @@ -1,50 +0,0 @@ -package pongo2 - -type tagSetNode struct { - name string - expression IEvaluator -} - -func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - // Evaluate expression - value, err := node.expression.Evaluate(ctx) - if err != nil { - return err - } - - ctx.Private[node.name] = value - return nil -} - -func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - node := &tagSetNode{} - - // Parse variable name - typeToken := arguments.MatchType(TokenIdentifier) - if typeToken == nil { - return nil, arguments.Error("Expected an identifier.", nil) - } - node.name = typeToken.Val - - if arguments.Match(TokenSymbol, "=") == nil { - return nil, arguments.Error("Expected '='.", nil) - } - - // Variable expression - keyExpression, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - node.expression = keyExpression - - // Remaining arguments - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed 'set'-tag arguments.", nil) - } - - return node, nil -} - -func init() { - RegisterTag("set", tagSetParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_spaceless.go b/vendor/github.com/flosch/pongo2/tags_spaceless.go deleted file mode 100644 index 4fa851ba..00000000 --- a/vendor/github.com/flosch/pongo2/tags_spaceless.go +++ /dev/null @@ -1,54 +0,0 @@ -package pongo2 - -import ( - "bytes" - "regexp" -) - -type tagSpacelessNode struct { - wrapper *NodeWrapper -} - -var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`) - -func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB - - err := node.wrapper.Execute(ctx, b) - if err != nil { - return err - } - - s := b.String() - // Repeat this recursively - changed := true - for changed { - s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3") - changed = s != s2 - s = s2 - } - - writer.WriteString(s) - - return nil -} - -func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - spacelessNode := &tagSpacelessNode{} - - wrapper, _, err := doc.WrapUntilTag("endspaceless") - if err != nil { - return nil, err - } - spacelessNode.wrapper = wrapper - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed spaceless-tag arguments.", nil) - } - - return spacelessNode, nil -} - -func init() { - RegisterTag("spaceless", tagSpacelessParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_ssi.go b/vendor/github.com/flosch/pongo2/tags_ssi.go deleted file mode 100644 index c33858d5..00000000 --- a/vendor/github.com/flosch/pongo2/tags_ssi.go +++ /dev/null @@ -1,68 +0,0 @@ -package pongo2 - -import ( - "io/ioutil" -) - -type tagSSINode struct { - filename string - content string - template *Template -} - -func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - if node.template != nil { - // Execute the template within the current context - includeCtx := make(Context) - includeCtx.Update(ctx.Public) - includeCtx.Update(ctx.Private) - - err := node.template.execute(includeCtx, writer) - if err != nil { - return err.(*Error) - } - } else { - // Just print out the content - writer.WriteString(node.content) - } - return nil -} - -func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - SSINode := &tagSSINode{} - - if fileToken := arguments.MatchType(TokenString); fileToken != nil { - SSINode.filename = fileToken.Val - - if arguments.Match(TokenIdentifier, "parsed") != nil { - // parsed - temporaryTpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, fileToken.Val)) - if err != nil { - return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, fileToken) - } - SSINode.template = temporaryTpl - } else { - // plaintext - buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, fileToken.Val)) - if err != nil { - return nil, (&Error{ - Sender: "tag:ssi", - OrigError: err, - }).updateFromTokenIfNeeded(doc.template, fileToken) - } - SSINode.content = string(buf) - } - } else { - return nil, arguments.Error("First argument must be a string.", nil) - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed SSI-tag argument.", nil) - } - - return SSINode, nil -} - -func init() { - RegisterTag("ssi", tagSSIParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_templatetag.go b/vendor/github.com/flosch/pongo2/tags_templatetag.go deleted file mode 100644 index 164b4dc3..00000000 --- a/vendor/github.com/flosch/pongo2/tags_templatetag.go +++ /dev/null @@ -1,45 +0,0 @@ -package pongo2 - -type tagTemplateTagNode struct { - content string -} - -var templateTagMapping = map[string]string{ - "openblock": "{%", - "closeblock": "%}", - "openvariable": "{{", - "closevariable": "}}", - "openbrace": "{", - "closebrace": "}", - "opencomment": "{#", - "closecomment": "#}", -} - -func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - writer.WriteString(node.content) - return nil -} - -func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - ttNode := &tagTemplateTagNode{} - - if argToken := arguments.MatchType(TokenIdentifier); argToken != nil { - output, found := templateTagMapping[argToken.Val] - if !found { - return nil, arguments.Error("Argument not found", argToken) - } - ttNode.content = output - } else { - return nil, arguments.Error("Identifier expected.", nil) - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed templatetag-tag argument.", nil) - } - - return ttNode, nil -} - -func init() { - RegisterTag("templatetag", tagTemplateTagParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_widthratio.go b/vendor/github.com/flosch/pongo2/tags_widthratio.go deleted file mode 100644 index 70c9c3e8..00000000 --- a/vendor/github.com/flosch/pongo2/tags_widthratio.go +++ /dev/null @@ -1,83 +0,0 @@ -package pongo2 - -import ( - "fmt" - "math" -) - -type tagWidthratioNode struct { - position *Token - current, max IEvaluator - width IEvaluator - ctxName string -} - -func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - current, err := node.current.Evaluate(ctx) - if err != nil { - return err - } - - max, err := node.max.Evaluate(ctx) - if err != nil { - return err - } - - width, err := node.width.Evaluate(ctx) - if err != nil { - return err - } - - value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5)) - - if node.ctxName == "" { - writer.WriteString(fmt.Sprintf("%d", value)) - } else { - ctx.Private[node.ctxName] = value - } - - return nil -} - -func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - widthratioNode := &tagWidthratioNode{ - position: start, - } - - current, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - widthratioNode.current = current - - max, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - widthratioNode.max = max - - width, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - widthratioNode.width = width - - if arguments.MatchOne(TokenKeyword, "as") != nil { - // Name follows - nameToken := arguments.MatchType(TokenIdentifier) - if nameToken == nil { - return nil, arguments.Error("Expected name (identifier).", nil) - } - widthratioNode.ctxName = nameToken.Val - } - - if arguments.Remaining() > 0 { - return nil, arguments.Error("Malformed widthratio-tag arguments.", nil) - } - - return widthratioNode, nil -} - -func init() { - RegisterTag("widthratio", tagWidthratioParser) -} diff --git a/vendor/github.com/flosch/pongo2/tags_with.go b/vendor/github.com/flosch/pongo2/tags_with.go deleted file mode 100644 index 32b3c1c4..00000000 --- a/vendor/github.com/flosch/pongo2/tags_with.go +++ /dev/null @@ -1,88 +0,0 @@ -package pongo2 - -type tagWithNode struct { - withPairs map[string]IEvaluator - wrapper *NodeWrapper -} - -func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - //new context for block - withctx := NewChildExecutionContext(ctx) - - // Put all custom with-pairs into the context - for key, value := range node.withPairs { - val, err := value.Evaluate(ctx) - if err != nil { - return err - } - withctx.Private[key] = val - } - - return node.wrapper.Execute(withctx, writer) -} - -func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) { - withNode := &tagWithNode{ - withPairs: make(map[string]IEvaluator), - } - - if arguments.Count() == 0 { - return nil, arguments.Error("Tag 'with' requires at least one argument.", nil) - } - - wrapper, endargs, err := doc.WrapUntilTag("endwith") - if err != nil { - return nil, err - } - withNode.wrapper = wrapper - - if endargs.Count() > 0 { - return nil, endargs.Error("Arguments not allowed here.", nil) - } - - // Scan through all arguments to see which style the user uses (old or new style). - // If we find any "as" keyword we will enforce old style; otherwise we will use new style. - oldStyle := false // by default we're using the new_style - for i := 0; i < arguments.Count(); i++ { - if arguments.PeekN(i, TokenKeyword, "as") != nil { - oldStyle = true - break - } - } - - for arguments.Remaining() > 0 { - if oldStyle { - valueExpr, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - if arguments.Match(TokenKeyword, "as") == nil { - return nil, arguments.Error("Expected 'as' keyword.", nil) - } - keyToken := arguments.MatchType(TokenIdentifier) - if keyToken == nil { - return nil, arguments.Error("Expected an identifier", nil) - } - withNode.withPairs[keyToken.Val] = valueExpr - } else { - keyToken := arguments.MatchType(TokenIdentifier) - if keyToken == nil { - return nil, arguments.Error("Expected an identifier", nil) - } - if arguments.Match(TokenSymbol, "=") == nil { - return nil, arguments.Error("Expected '='.", nil) - } - valueExpr, err := arguments.ParseExpression() - if err != nil { - return nil, err - } - withNode.withPairs[keyToken.Val] = valueExpr - } - } - - return withNode, nil -} - -func init() { - RegisterTag("with", tagWithParser) -} diff --git a/vendor/github.com/flosch/pongo2/template.go b/vendor/github.com/flosch/pongo2/template.go deleted file mode 100644 index 47666c94..00000000 --- a/vendor/github.com/flosch/pongo2/template.go +++ /dev/null @@ -1,276 +0,0 @@ -package pongo2 - -import ( - "bytes" - "fmt" - "io" - "strings" -) - -type TemplateWriter interface { - io.Writer - WriteString(string) (int, error) -} - -type templateWriter struct { - w io.Writer -} - -func (tw *templateWriter) WriteString(s string) (int, error) { - return tw.w.Write([]byte(s)) -} - -func (tw *templateWriter) Write(b []byte) (int, error) { - return tw.w.Write(b) -} - -type Template struct { - set *TemplateSet - - // Input - isTplString bool - name string - tpl string - size int - - // Calculation - tokens []*Token - parser *Parser - - // first come, first serve (it's important to not override existing entries in here) - level int - parent *Template - child *Template - blocks map[string]*NodeWrapper - exportedMacros map[string]*tagMacroNode - - // Output - root *nodeDocument - - // Options allow you to change the behavior of template-engine. - // You can change the options before calling the Execute method. - Options *Options -} - -func newTemplateString(set *TemplateSet, tpl []byte) (*Template, error) { - return newTemplate(set, "", true, tpl) -} - -func newTemplate(set *TemplateSet, name string, isTplString bool, tpl []byte) (*Template, error) { - strTpl := string(tpl) - - // Create the template - t := &Template{ - set: set, - isTplString: isTplString, - name: name, - tpl: strTpl, - size: len(strTpl), - blocks: make(map[string]*NodeWrapper), - exportedMacros: make(map[string]*tagMacroNode), - Options: newOptions(), - } - // Copy all settings from another Options. - t.Options.Update(set.Options) - - // Tokenize it - tokens, err := lex(name, strTpl) - if err != nil { - return nil, err - } - t.tokens = tokens - - // For debugging purposes, show all tokens: - /*for i, t := range tokens { - fmt.Printf("%3d. %s\n", i, t) - }*/ - - // Parse it - err = t.parse() - if err != nil { - return nil, err - } - - return t, nil -} - -func (tpl *Template) newContextForExecution(context Context) (*Template, *ExecutionContext, error) { - if tpl.Options.TrimBlocks || tpl.Options.LStripBlocks { - // Issue #94 https://github.com/flosch/pongo2/issues/94 - // If an application configures pongo2 template to trim_blocks, - // the first newline after a template tag is removed automatically (like in PHP). - prev := &Token{ - Typ: TokenHTML, - Val: "\n", - } - - for _, t := range tpl.tokens { - if tpl.Options.LStripBlocks { - if prev.Typ == TokenHTML && t.Typ != TokenHTML && t.Val == "{%" { - prev.Val = strings.TrimRight(prev.Val, "\t ") - } - } - - if tpl.Options.TrimBlocks { - if prev.Typ != TokenHTML && t.Typ == TokenHTML && prev.Val == "%}" { - if len(t.Val) > 0 && t.Val[0] == '\n' { - t.Val = t.Val[1:len(t.Val)] - } - } - } - - prev = t - } - } - - // Determine the parent to be executed (for template inheritance) - parent := tpl - for parent.parent != nil { - parent = parent.parent - } - - // Create context if none is given - newContext := make(Context) - newContext.Update(tpl.set.Globals) - - if context != nil { - newContext.Update(context) - - if len(newContext) > 0 { - // Check for context name syntax - err := newContext.checkForValidIdentifiers() - if err != nil { - return parent, nil, err - } - - // Check for clashes with macro names - for k := range newContext { - _, has := tpl.exportedMacros[k] - if has { - return parent, nil, &Error{ - Filename: tpl.name, - Sender: "execution", - OrigError: fmt.Errorf("context key name '%s' clashes with macro '%s'", k, k), - } - } - } - } - } - - // Create operational context - ctx := newExecutionContext(parent, newContext) - - return parent, ctx, nil -} - -func (tpl *Template) execute(context Context, writer TemplateWriter) error { - parent, ctx, err := tpl.newContextForExecution(context) - if err != nil { - return err - } - - // Run the selected document - if err := parent.root.Execute(ctx, writer); err != nil { - return err - } - - return nil -} - -func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error { - return tpl.execute(context, &templateWriter{w: writer}) -} - -func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) { - // Create output buffer - // We assume that the rendered template will be 30% larger - buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3))) - if err := tpl.execute(context, buffer); err != nil { - return nil, err - } - return buffer, nil -} - -// Executes the template with the given context and writes to writer (io.Writer) -// on success. Context can be nil. Nothing is written on error; instead the error -// is being returned. -func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error { - buf, err := tpl.newBufferAndExecute(context) - if err != nil { - return err - } - _, err = buf.WriteTo(writer) - if err != nil { - return err - } - return nil -} - -// Same as ExecuteWriter. The only difference between both functions is that -// this function might already have written parts of the generated template in the -// case of an execution error because there's no intermediate buffer involved for -// performance reasons. This is handy if you need high performance template -// generation or if you want to manage your own pool of buffers. -func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error { - return tpl.newTemplateWriterAndExecute(context, writer) -} - -// Executes the template and returns the rendered template as a []byte -func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) { - // Execute template - buffer, err := tpl.newBufferAndExecute(context) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// Executes the template and returns the rendered template as a string -func (tpl *Template) Execute(context Context) (string, error) { - // Execute template - buffer, err := tpl.newBufferAndExecute(context) - if err != nil { - return "", err - } - - return buffer.String(), nil - -} - -func (tpl *Template) ExecuteBlocks(context Context, blocks []string) (map[string]string, error) { - var parents []*Template - result := make(map[string]string) - - parent := tpl - for parent != nil { - parents = append(parents, parent) - parent = parent.parent - } - - for _, t := range parents { - buffer := bytes.NewBuffer(make([]byte, 0, int(float64(t.size)*1.3))) - _, ctx, err := t.newContextForExecution(context) - if err != nil { - return nil, err - } - for _, blockName := range blocks { - if _, ok := result[blockName]; ok { - continue - } - if blockWrapper, ok := t.blocks[blockName]; ok { - bErr := blockWrapper.Execute(ctx, buffer) - if bErr != nil { - return nil, bErr - } - result[blockName] = buffer.String() - buffer.Reset() - } - } - // We have found all blocks - if len(blocks) == len(result) { - break - } - } - - return result, nil -} diff --git a/vendor/github.com/flosch/pongo2/template_loader.go b/vendor/github.com/flosch/pongo2/template_loader.go deleted file mode 100644 index abd23409..00000000 --- a/vendor/github.com/flosch/pongo2/template_loader.go +++ /dev/null @@ -1,156 +0,0 @@ -package pongo2 - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" -) - -// LocalFilesystemLoader represents a local filesystem loader with basic -// BaseDirectory capabilities. The access to the local filesystem is unrestricted. -type LocalFilesystemLoader struct { - baseDir string -} - -// MustNewLocalFileSystemLoader creates a new LocalFilesystemLoader instance -// and panics if there's any error during instantiation. The parameters -// are the same like NewLocalFileSystemLoader. -func MustNewLocalFileSystemLoader(baseDir string) *LocalFilesystemLoader { - fs, err := NewLocalFileSystemLoader(baseDir) - if err != nil { - log.Panic(err) - } - return fs -} - -// NewLocalFileSystemLoader creates a new LocalFilesystemLoader and allows -// templatesto be loaded from disk (unrestricted). If any base directory -// is given (or being set using SetBaseDir), this base directory is being used -// for path calculation in template inclusions/imports. Otherwise the path -// is calculated based relatively to the including template's path. -func NewLocalFileSystemLoader(baseDir string) (*LocalFilesystemLoader, error) { - fs := &LocalFilesystemLoader{} - if baseDir != "" { - if err := fs.SetBaseDir(baseDir); err != nil { - return nil, err - } - } - return fs, nil -} - -// SetBaseDir sets the template's base directory. This directory will -// be used for any relative path in filters, tags and From*-functions to determine -// your template. See the comment for NewLocalFileSystemLoader as well. -func (fs *LocalFilesystemLoader) SetBaseDir(path string) error { - // Make the path absolute - if !filepath.IsAbs(path) { - abs, err := filepath.Abs(path) - if err != nil { - return err - } - path = abs - } - - // Check for existence - fi, err := os.Stat(path) - if err != nil { - return err - } - if !fi.IsDir() { - return fmt.Errorf("The given path '%s' is not a directory.", path) - } - - fs.baseDir = path - return nil -} - -// Get reads the path's content from your local filesystem. -func (fs *LocalFilesystemLoader) Get(path string) (io.Reader, error) { - buf, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - return bytes.NewReader(buf), nil -} - -// Abs resolves a filename relative to the base directory. Absolute paths are allowed. -// When there's no base dir set, the absolute path to the filename -// will be calculated based on either the provided base directory (which -// might be a path of a template which includes another template) or -// the current working directory. -func (fs *LocalFilesystemLoader) Abs(base, name string) string { - if filepath.IsAbs(name) { - return name - } - - // Our own base dir has always priority; if there's none - // we use the path provided in base. - var err error - if fs.baseDir == "" { - if base == "" { - base, err = os.Getwd() - if err != nil { - panic(err) - } - return filepath.Join(base, name) - } - - return filepath.Join(filepath.Dir(base), name) - } - - return filepath.Join(fs.baseDir, name) -} - -// SandboxedFilesystemLoader is still WIP. -type SandboxedFilesystemLoader struct { - *LocalFilesystemLoader -} - -// NewSandboxedFilesystemLoader creates a new sandboxed local file system instance. -func NewSandboxedFilesystemLoader(baseDir string) (*SandboxedFilesystemLoader, error) { - fs, err := NewLocalFileSystemLoader(baseDir) - if err != nil { - return nil, err - } - return &SandboxedFilesystemLoader{ - LocalFilesystemLoader: fs, - }, nil -} - -// Move sandbox to a virtual fs - -/* -if len(set.SandboxDirectories) > 0 { - defer func() { - // Remove any ".." or other crap - resolvedPath = filepath.Clean(resolvedPath) - - // Make the path absolute - absPath, err := filepath.Abs(resolvedPath) - if err != nil { - panic(err) - } - resolvedPath = absPath - - // Check against the sandbox directories (once one pattern matches, we're done and can allow it) - for _, pattern := range set.SandboxDirectories { - matched, err := filepath.Match(pattern, resolvedPath) - if err != nil { - panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).") - } - if matched { - // OK! - return - } - } - - // No pattern matched, we have to log+deny the request - set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolvedPath) - resolvedPath = "" - }() -} -*/ diff --git a/vendor/github.com/flosch/pongo2/template_sets.go b/vendor/github.com/flosch/pongo2/template_sets.go deleted file mode 100644 index 4b1e43da..00000000 --- a/vendor/github.com/flosch/pongo2/template_sets.go +++ /dev/null @@ -1,305 +0,0 @@ -package pongo2 - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "os" - "sync" - - "errors" -) - -// TemplateLoader allows to implement a virtual file system. -type TemplateLoader interface { - // Abs calculates the path to a given template. Whenever a path must be resolved - // due to an import from another template, the base equals the parent template's path. - Abs(base, name string) string - - // Get returns an io.Reader where the template's content can be read from. - Get(path string) (io.Reader, error) -} - -// TemplateSet allows you to create your own group of templates with their own -// global context (which is shared among all members of the set) and their own -// configuration. -// It's useful for a separation of different kind of templates -// (e. g. web templates vs. mail templates). -type TemplateSet struct { - name string - loaders []TemplateLoader - - // Globals will be provided to all templates created within this template set - Globals Context - - // If debug is true (default false), ExecutionContext.Logf() will work and output - // to STDOUT. Furthermore, FromCache() won't cache the templates. - // Make sure to synchronize the access to it in case you're changing this - // variable during program execution (and template compilation/execution). - Debug bool - - // Options allow you to change the behavior of template-engine. - // You can change the options before calling the Execute method. - Options *Options - - // Sandbox features - // - Disallow access to specific tags and/or filters (using BanTag() and BanFilter()) - // - // For efficiency reasons you can ban tags/filters only *before* you have - // added your first template to the set (restrictions are statically checked). - // After you added one, it's not possible anymore (for your personal security). - firstTemplateCreated bool - bannedTags map[string]bool - bannedFilters map[string]bool - - // Template cache (for FromCache()) - templateCache map[string]*Template - templateCacheMutex sync.Mutex -} - -// NewSet can be used to create sets with different kind of templates -// (e. g. web from mail templates), with different globals or -// other configurations. -func NewSet(name string, loaders ...TemplateLoader) *TemplateSet { - if len(loaders) == 0 { - panic(fmt.Errorf("at least one template loader must be specified")) - } - - return &TemplateSet{ - name: name, - loaders: loaders, - Globals: make(Context), - bannedTags: make(map[string]bool), - bannedFilters: make(map[string]bool), - templateCache: make(map[string]*Template), - Options: newOptions(), - } -} - -func (set *TemplateSet) AddLoader(loaders ...TemplateLoader) { - set.loaders = append(set.loaders, loaders...) -} - -func (set *TemplateSet) resolveFilename(tpl *Template, path string) string { - return set.resolveFilenameForLoader(set.loaders[0], tpl, path) -} - -func (set *TemplateSet) resolveFilenameForLoader(loader TemplateLoader, tpl *Template, path string) string { - name := "" - if tpl != nil && tpl.isTplString { - return path - } - if tpl != nil { - name = tpl.name - } - - return loader.Abs(name, path) -} - -// BanTag bans a specific tag for this template set. See more in the documentation for TemplateSet. -func (set *TemplateSet) BanTag(name string) error { - _, has := tags[name] - if !has { - return fmt.Errorf("tag '%s' not found", name) - } - if set.firstTemplateCreated { - return errors.New("you cannot ban any tags after you've added your first template to your template set") - } - _, has = set.bannedTags[name] - if has { - return fmt.Errorf("tag '%s' is already banned", name) - } - set.bannedTags[name] = true - - return nil -} - -// BanFilter bans a specific filter for this template set. See more in the documentation for TemplateSet. -func (set *TemplateSet) BanFilter(name string) error { - _, has := filters[name] - if !has { - return fmt.Errorf("filter '%s' not found", name) - } - if set.firstTemplateCreated { - return errors.New("you cannot ban any filters after you've added your first template to your template set") - } - _, has = set.bannedFilters[name] - if has { - return fmt.Errorf("filter '%s' is already banned", name) - } - set.bannedFilters[name] = true - - return nil -} - -func (set *TemplateSet) resolveTemplate(tpl *Template, path string) (name string, loader TemplateLoader, fd io.Reader, err error) { - // iterate over loaders until we appear to have a valid template - for _, loader = range set.loaders { - name = set.resolveFilenameForLoader(loader, tpl, path) - fd, err = loader.Get(name) - if err == nil { - return - } - } - - return path, nil, nil, fmt.Errorf("unable to resolve template") -} - -// CleanCache cleans the template cache. If filenames is not empty, -// it will remove the template caches of those filenames. -// Or it will empty the whole template cache. It is thread-safe. -func (set *TemplateSet) CleanCache(filenames ...string) { - set.templateCacheMutex.Lock() - defer set.templateCacheMutex.Unlock() - - if len(filenames) == 0 { - set.templateCache = make(map[string]*Template, len(set.templateCache)) - } - - for _, filename := range filenames { - delete(set.templateCache, set.resolveFilename(nil, filename)) - } -} - -// FromCache is a convenient method to cache templates. It is thread-safe -// and will only compile the template associated with a filename once. -// If TemplateSet.Debug is true (for example during development phase), -// FromCache() will not cache the template and instead recompile it on any -// call (to make changes to a template live instantaneously). -func (set *TemplateSet) FromCache(filename string) (*Template, error) { - if set.Debug { - // Recompile on any request - return set.FromFile(filename) - } - // Cache the template - cleanedFilename := set.resolveFilename(nil, filename) - - set.templateCacheMutex.Lock() - defer set.templateCacheMutex.Unlock() - - tpl, has := set.templateCache[cleanedFilename] - - // Cache miss - if !has { - tpl, err := set.FromFile(cleanedFilename) - if err != nil { - return nil, err - } - set.templateCache[cleanedFilename] = tpl - return tpl, nil - } - - // Cache hit - return tpl, nil -} - -// FromString loads a template from string and returns a Template instance. -func (set *TemplateSet) FromString(tpl string) (*Template, error) { - set.firstTemplateCreated = true - - return newTemplateString(set, []byte(tpl)) -} - -// FromBytes loads a template from bytes and returns a Template instance. -func (set *TemplateSet) FromBytes(tpl []byte) (*Template, error) { - set.firstTemplateCreated = true - - return newTemplateString(set, tpl) -} - -// FromFile loads a template from a filename and returns a Template instance. -func (set *TemplateSet) FromFile(filename string) (*Template, error) { - set.firstTemplateCreated = true - - _, _, fd, err := set.resolveTemplate(nil, filename) - if err != nil { - return nil, &Error{ - Filename: filename, - Sender: "fromfile", - OrigError: err, - } - } - buf, err := ioutil.ReadAll(fd) - if err != nil { - return nil, &Error{ - Filename: filename, - Sender: "fromfile", - OrigError: err, - } - } - - return newTemplate(set, filename, false, buf) -} - -// RenderTemplateString is a shortcut and renders a template string directly. -func (set *TemplateSet) RenderTemplateString(s string, ctx Context) (string, error) { - set.firstTemplateCreated = true - - tpl := Must(set.FromString(s)) - result, err := tpl.Execute(ctx) - if err != nil { - return "", err - } - return result, nil -} - -// RenderTemplateBytes is a shortcut and renders template bytes directly. -func (set *TemplateSet) RenderTemplateBytes(b []byte, ctx Context) (string, error) { - set.firstTemplateCreated = true - - tpl := Must(set.FromBytes(b)) - result, err := tpl.Execute(ctx) - if err != nil { - return "", err - } - return result, nil -} - -// RenderTemplateFile is a shortcut and renders a template file directly. -func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) (string, error) { - set.firstTemplateCreated = true - - tpl := Must(set.FromFile(fn)) - result, err := tpl.Execute(ctx) - if err != nil { - return "", err - } - return result, nil -} - -func (set *TemplateSet) logf(format string, args ...interface{}) { - if set.Debug { - logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...) - } -} - -// Logging function (internally used) -func logf(format string, items ...interface{}) { - if debug { - logger.Printf(format, items...) - } -} - -var ( - debug bool // internal debugging - logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags|log.Lshortfile) - - // DefaultLoader allows the default un-sandboxed access to the local file - // system and is being used by the DefaultSet. - DefaultLoader = MustNewLocalFileSystemLoader("") - - // DefaultSet is a set created for you for convinience reasons. - DefaultSet = NewSet("default", DefaultLoader) - - // Methods on the default set - FromString = DefaultSet.FromString - FromBytes = DefaultSet.FromBytes - FromFile = DefaultSet.FromFile - FromCache = DefaultSet.FromCache - RenderTemplateString = DefaultSet.RenderTemplateString - RenderTemplateFile = DefaultSet.RenderTemplateFile - - // Globals for the default set - Globals = DefaultSet.Globals -) diff --git a/vendor/github.com/flosch/pongo2/value.go b/vendor/github.com/flosch/pongo2/value.go deleted file mode 100644 index 8b49adb7..00000000 --- a/vendor/github.com/flosch/pongo2/value.go +++ /dev/null @@ -1,540 +0,0 @@ -package pongo2 - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type Value struct { - val reflect.Value - safe bool // used to indicate whether a Value needs explicit escaping in the template -} - -// AsValue converts any given value to a pongo2.Value -// Usually being used within own functions passed to a template -// through a Context or within filter functions. -// -// Example: -// AsValue("my string") -func AsValue(i interface{}) *Value { - return &Value{ - val: reflect.ValueOf(i), - } -} - -// AsSafeValue works like AsValue, but does not apply the 'escape' filter. -func AsSafeValue(i interface{}) *Value { - return &Value{ - val: reflect.ValueOf(i), - safe: true, - } -} - -func (v *Value) getResolvedValue() reflect.Value { - if v.val.IsValid() && v.val.Kind() == reflect.Ptr { - return v.val.Elem() - } - return v.val -} - -// IsString checks whether the underlying value is a string -func (v *Value) IsString() bool { - return v.getResolvedValue().Kind() == reflect.String -} - -// IsBool checks whether the underlying value is a bool -func (v *Value) IsBool() bool { - return v.getResolvedValue().Kind() == reflect.Bool -} - -// IsFloat checks whether the underlying value is a float -func (v *Value) IsFloat() bool { - return v.getResolvedValue().Kind() == reflect.Float32 || - v.getResolvedValue().Kind() == reflect.Float64 -} - -// IsInteger checks whether the underlying value is an integer -func (v *Value) IsInteger() bool { - return v.getResolvedValue().Kind() == reflect.Int || - v.getResolvedValue().Kind() == reflect.Int8 || - v.getResolvedValue().Kind() == reflect.Int16 || - v.getResolvedValue().Kind() == reflect.Int32 || - v.getResolvedValue().Kind() == reflect.Int64 || - v.getResolvedValue().Kind() == reflect.Uint || - v.getResolvedValue().Kind() == reflect.Uint8 || - v.getResolvedValue().Kind() == reflect.Uint16 || - v.getResolvedValue().Kind() == reflect.Uint32 || - v.getResolvedValue().Kind() == reflect.Uint64 -} - -// IsNumber checks whether the underlying value is either an integer -// or a float. -func (v *Value) IsNumber() bool { - return v.IsInteger() || v.IsFloat() -} - -// IsTime checks whether the underlying value is a time.Time. -func (v *Value) IsTime() bool { - _, ok := v.Interface().(time.Time) - return ok -} - -// IsNil checks whether the underlying value is NIL -func (v *Value) IsNil() bool { - //fmt.Printf("%+v\n", v.getResolvedValue().Type().String()) - return !v.getResolvedValue().IsValid() -} - -// String returns a string for the underlying value. If this value is not -// of type string, pongo2 tries to convert it. Currently the following -// types for underlying values are supported: -// -// 1. string -// 2. int/uint (any size) -// 3. float (any precision) -// 4. bool -// 5. time.Time -// 6. String() will be called on the underlying value if provided -// -// NIL values will lead to an empty string. Unsupported types are leading -// to their respective type name. -func (v *Value) String() string { - if v.IsNil() { - return "" - } - - switch v.getResolvedValue().Kind() { - case reflect.String: - return v.getResolvedValue().String() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(v.getResolvedValue().Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.FormatUint(v.getResolvedValue().Uint(), 10) - case reflect.Float32, reflect.Float64: - return fmt.Sprintf("%f", v.getResolvedValue().Float()) - case reflect.Bool: - if v.Bool() { - return "True" - } - return "False" - case reflect.Struct: - if t, ok := v.Interface().(fmt.Stringer); ok { - return t.String() - } - } - - logf("Value.String() not implemented for type: %s\n", v.getResolvedValue().Kind().String()) - return v.getResolvedValue().String() -} - -// Integer returns the underlying value as an integer (converts the underlying -// value, if necessary). If it's not possible to convert the underlying value, -// it will return 0. -func (v *Value) Integer() int { - switch v.getResolvedValue().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return int(v.getResolvedValue().Int()) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return int(v.getResolvedValue().Uint()) - case reflect.Float32, reflect.Float64: - return int(v.getResolvedValue().Float()) - case reflect.String: - // Try to convert from string to int (base 10) - f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64) - if err != nil { - return 0 - } - return int(f) - default: - logf("Value.Integer() not available for type: %s\n", v.getResolvedValue().Kind().String()) - return 0 - } -} - -// Float returns the underlying value as a float (converts the underlying -// value, if necessary). If it's not possible to convert the underlying value, -// it will return 0.0. -func (v *Value) Float() float64 { - switch v.getResolvedValue().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.getResolvedValue().Int()) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return float64(v.getResolvedValue().Uint()) - case reflect.Float32, reflect.Float64: - return v.getResolvedValue().Float() - case reflect.String: - // Try to convert from string to float64 (base 10) - f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64) - if err != nil { - return 0.0 - } - return f - default: - logf("Value.Float() not available for type: %s\n", v.getResolvedValue().Kind().String()) - return 0.0 - } -} - -// Bool returns the underlying value as bool. If the value is not bool, false -// will always be returned. If you're looking for true/false-evaluation of the -// underlying value, have a look on the IsTrue()-function. -func (v *Value) Bool() bool { - switch v.getResolvedValue().Kind() { - case reflect.Bool: - return v.getResolvedValue().Bool() - default: - logf("Value.Bool() not available for type: %s\n", v.getResolvedValue().Kind().String()) - return false - } -} - -// Time returns the underlying value as time.Time. -// If the underlying value is not a time.Time, it returns the zero value of time.Time. -func (v *Value) Time() time.Time { - tm, ok := v.Interface().(time.Time) - if ok { - return tm - } - return time.Time{} -} - -// IsTrue tries to evaluate the underlying value the Pythonic-way: -// -// Returns TRUE in one the following cases: -// -// * int != 0 -// * uint != 0 -// * float != 0.0 -// * len(array/chan/map/slice/string) > 0 -// * bool == true -// * underlying value is a struct -// -// Otherwise returns always FALSE. -func (v *Value) IsTrue() bool { - switch v.getResolvedValue().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.getResolvedValue().Int() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return v.getResolvedValue().Uint() != 0 - case reflect.Float32, reflect.Float64: - return v.getResolvedValue().Float() != 0 - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.getResolvedValue().Len() > 0 - case reflect.Bool: - return v.getResolvedValue().Bool() - case reflect.Struct: - return true // struct instance is always true - default: - logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String()) - return false - } -} - -// Negate tries to negate the underlying value. It's mainly used for -// the NOT-operator and in conjunction with a call to -// return_value.IsTrue() afterwards. -// -// Example: -// AsValue(1).Negate().IsTrue() == false -func (v *Value) Negate() *Value { - switch v.getResolvedValue().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if v.Integer() != 0 { - return AsValue(0) - } - return AsValue(1) - case reflect.Float32, reflect.Float64: - if v.Float() != 0.0 { - return AsValue(float64(0.0)) - } - return AsValue(float64(1.1)) - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return AsValue(v.getResolvedValue().Len() == 0) - case reflect.Bool: - return AsValue(!v.getResolvedValue().Bool()) - case reflect.Struct: - return AsValue(false) - default: - logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String()) - return AsValue(true) - } -} - -// Len returns the length for an array, chan, map, slice or string. -// Otherwise it will return 0. -func (v *Value) Len() int { - switch v.getResolvedValue().Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - return v.getResolvedValue().Len() - case reflect.String: - runes := []rune(v.getResolvedValue().String()) - return len(runes) - default: - logf("Value.Len() not available for type: %s\n", v.getResolvedValue().Kind().String()) - return 0 - } -} - -// Slice slices an array, slice or string. Otherwise it will -// return an empty []int. -func (v *Value) Slice(i, j int) *Value { - switch v.getResolvedValue().Kind() { - case reflect.Array, reflect.Slice: - return AsValue(v.getResolvedValue().Slice(i, j).Interface()) - case reflect.String: - runes := []rune(v.getResolvedValue().String()) - return AsValue(string(runes[i:j])) - default: - logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String()) - return AsValue([]int{}) - } -} - -// Index gets the i-th item of an array, slice or string. Otherwise -// it will return NIL. -func (v *Value) Index(i int) *Value { - switch v.getResolvedValue().Kind() { - case reflect.Array, reflect.Slice: - if i >= v.Len() { - return AsValue(nil) - } - return AsValue(v.getResolvedValue().Index(i).Interface()) - case reflect.String: - //return AsValue(v.getResolvedValue().Slice(i, i+1).Interface()) - s := v.getResolvedValue().String() - runes := []rune(s) - if i < len(runes) { - return AsValue(string(runes[i])) - } - return AsValue("") - default: - logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String()) - return AsValue([]int{}) - } -} - -// Contains checks whether the underlying value (which must be of type struct, map, -// string, array or slice) contains of another Value (e. g. used to check -// whether a struct contains of a specific field or a map contains a specific key). -// -// Example: -// AsValue("Hello, World!").Contains(AsValue("World")) == true -func (v *Value) Contains(other *Value) bool { - switch v.getResolvedValue().Kind() { - case reflect.Struct: - fieldValue := v.getResolvedValue().FieldByName(other.String()) - return fieldValue.IsValid() - case reflect.Map: - var mapValue reflect.Value - switch other.Interface().(type) { - case int: - mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue()) - case string: - mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue()) - default: - logf("Value.Contains() does not support lookup type '%s'\n", other.getResolvedValue().Kind().String()) - return false - } - - return mapValue.IsValid() - case reflect.String: - return strings.Contains(v.getResolvedValue().String(), other.String()) - - case reflect.Slice, reflect.Array: - for i := 0; i < v.getResolvedValue().Len(); i++ { - item := v.getResolvedValue().Index(i) - if other.Interface() == item.Interface() { - return true - } - } - return false - - default: - logf("Value.Contains() not available for type: %s\n", v.getResolvedValue().Kind().String()) - return false - } -} - -// CanSlice checks whether the underlying value is of type array, slice or string. -// You normally would use CanSlice() before using the Slice() operation. -func (v *Value) CanSlice() bool { - switch v.getResolvedValue().Kind() { - case reflect.Array, reflect.Slice, reflect.String: - return true - } - return false -} - -// Iterate iterates over a map, array, slice or a string. It calls the -// function's first argument for every value with the following arguments: -// -// idx current 0-index -// count total amount of items -// key *Value for the key or item -// value *Value (only for maps, the respective value for a specific key) -// -// If the underlying value has no items or is not one of the types above, -// the empty function (function's second argument) will be called. -func (v *Value) Iterate(fn func(idx, count int, key, value *Value) bool, empty func()) { - v.IterateOrder(fn, empty, false, false) -} - -// IterateOrder behaves like Value.Iterate, but can iterate through an array/slice/string in reverse. Does -// not affect the iteration through a map because maps don't have any particular order. -// However, you can force an order using the `sorted` keyword (and even use `reversed sorted`). -func (v *Value) IterateOrder(fn func(idx, count int, key, value *Value) bool, empty func(), reverse bool, sorted bool) { - switch v.getResolvedValue().Kind() { - case reflect.Map: - keys := sortedKeys(v.getResolvedValue().MapKeys()) - if sorted { - if reverse { - sort.Sort(sort.Reverse(keys)) - } else { - sort.Sort(keys) - } - } - keyLen := len(keys) - for idx, key := range keys { - value := v.getResolvedValue().MapIndex(key) - if !fn(idx, keyLen, &Value{val: key}, &Value{val: value}) { - return - } - } - if keyLen == 0 { - empty() - } - return // done - case reflect.Array, reflect.Slice: - var items valuesList - - itemCount := v.getResolvedValue().Len() - for i := 0; i < itemCount; i++ { - items = append(items, &Value{val: v.getResolvedValue().Index(i)}) - } - - if sorted { - if reverse { - sort.Sort(sort.Reverse(items)) - } else { - sort.Sort(items) - } - } else { - if reverse { - for i := 0; i < itemCount/2; i++ { - items[i], items[itemCount-1-i] = items[itemCount-1-i], items[i] - } - } - } - - if len(items) > 0 { - for idx, item := range items { - if !fn(idx, itemCount, item, nil) { - return - } - } - } else { - empty() - } - return // done - case reflect.String: - if sorted { - // TODO(flosch): Handle sorted - panic("TODO: handle sort for type string") - } - - // TODO(flosch): Not utf8-compatible (utf8-decoding necessary) - charCount := v.getResolvedValue().Len() - if charCount > 0 { - if reverse { - for i := charCount - 1; i >= 0; i-- { - if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) { - return - } - } - } else { - for i := 0; i < charCount; i++ { - if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) { - return - } - } - } - } else { - empty() - } - return // done - default: - logf("Value.Iterate() not available for type: %s\n", v.getResolvedValue().Kind().String()) - } - empty() -} - -// Interface gives you access to the underlying value. -func (v *Value) Interface() interface{} { - if v.val.IsValid() { - return v.val.Interface() - } - return nil -} - -// EqualValueTo checks whether two values are containing the same value or object. -func (v *Value) EqualValueTo(other *Value) bool { - // comparison of uint with int fails using .Interface()-comparison (see issue #64) - if v.IsInteger() && other.IsInteger() { - return v.Integer() == other.Integer() - } - if v.IsTime() && other.IsTime() { - return v.Time().Equal(other.Time()) - } - return v.Interface() == other.Interface() -} - -type sortedKeys []reflect.Value - -func (sk sortedKeys) Len() int { - return len(sk) -} - -func (sk sortedKeys) Less(i, j int) bool { - vi := &Value{val: sk[i]} - vj := &Value{val: sk[j]} - switch { - case vi.IsInteger() && vj.IsInteger(): - return vi.Integer() < vj.Integer() - case vi.IsFloat() && vj.IsFloat(): - return vi.Float() < vj.Float() - default: - return vi.String() < vj.String() - } -} - -func (sk sortedKeys) Swap(i, j int) { - sk[i], sk[j] = sk[j], sk[i] -} - -type valuesList []*Value - -func (vl valuesList) Len() int { - return len(vl) -} - -func (vl valuesList) Less(i, j int) bool { - vi := vl[i] - vj := vl[j] - switch { - case vi.IsInteger() && vj.IsInteger(): - return vi.Integer() < vj.Integer() - case vi.IsFloat() && vj.IsFloat(): - return vi.Float() < vj.Float() - default: - return vi.String() < vj.String() - } -} - -func (vl valuesList) Swap(i, j int) { - vl[i], vl[j] = vl[j], vl[i] -} diff --git a/vendor/github.com/flosch/pongo2/variable.go b/vendor/github.com/flosch/pongo2/variable.go deleted file mode 100644 index 25e2af40..00000000 --- a/vendor/github.com/flosch/pongo2/variable.go +++ /dev/null @@ -1,693 +0,0 @@ -package pongo2 - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -const ( - varTypeInt = iota - varTypeIdent -) - -var ( - typeOfValuePtr = reflect.TypeOf(new(Value)) - typeOfExecCtxPtr = reflect.TypeOf(new(ExecutionContext)) -) - -type variablePart struct { - typ int - s string - i int - - isFunctionCall bool - callingArgs []functionCallArgument // needed for a function call, represents all argument nodes (INode supports nested function calls) -} - -type functionCallArgument interface { - Evaluate(*ExecutionContext) (*Value, *Error) -} - -// TODO: Add location tokens -type stringResolver struct { - locationToken *Token - val string -} - -type intResolver struct { - locationToken *Token - val int -} - -type floatResolver struct { - locationToken *Token - val float64 -} - -type boolResolver struct { - locationToken *Token - val bool -} - -type variableResolver struct { - locationToken *Token - - parts []*variablePart -} - -type nodeFilteredVariable struct { - locationToken *Token - - resolver IEvaluator - filterChain []*filterCall -} - -type nodeVariable struct { - locationToken *Token - expr IEvaluator -} - -type executionCtxEval struct{} - -func (v *nodeFilteredVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := v.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (vr *variableResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := vr.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (s *stringResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := s.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (i *intResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := i.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (f *floatResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := f.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (b *boolResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := b.Evaluate(ctx) - if err != nil { - return err - } - writer.WriteString(value.String()) - return nil -} - -func (v *nodeFilteredVariable) GetPositionToken() *Token { - return v.locationToken -} - -func (vr *variableResolver) GetPositionToken() *Token { - return vr.locationToken -} - -func (s *stringResolver) GetPositionToken() *Token { - return s.locationToken -} - -func (i *intResolver) GetPositionToken() *Token { - return i.locationToken -} - -func (f *floatResolver) GetPositionToken() *Token { - return f.locationToken -} - -func (b *boolResolver) GetPositionToken() *Token { - return b.locationToken -} - -func (s *stringResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - return AsValue(s.val), nil -} - -func (i *intResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - return AsValue(i.val), nil -} - -func (f *floatResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - return AsValue(f.val), nil -} - -func (b *boolResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - return AsValue(b.val), nil -} - -func (s *stringResolver) FilterApplied(name string) bool { - return false -} - -func (i *intResolver) FilterApplied(name string) bool { - return false -} - -func (f *floatResolver) FilterApplied(name string) bool { - return false -} - -func (b *boolResolver) FilterApplied(name string) bool { - return false -} - -func (nv *nodeVariable) FilterApplied(name string) bool { - return nv.expr.FilterApplied(name) -} - -func (nv *nodeVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error { - value, err := nv.expr.Evaluate(ctx) - if err != nil { - return err - } - - if !nv.expr.FilterApplied("safe") && !value.safe && value.IsString() && ctx.Autoescape { - // apply escape filter - value, err = filters["escape"](value, nil) - if err != nil { - return err - } - } - - writer.WriteString(value.String()) - return nil -} - -func (executionCtxEval) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - return AsValue(ctx), nil -} - -func (vr *variableResolver) FilterApplied(name string) bool { - return false -} - -func (vr *variableResolver) String() string { - parts := make([]string, 0, len(vr.parts)) - for _, p := range vr.parts { - switch p.typ { - case varTypeInt: - parts = append(parts, strconv.Itoa(p.i)) - case varTypeIdent: - parts = append(parts, p.s) - default: - panic("unimplemented") - } - } - return strings.Join(parts, ".") -} - -func (vr *variableResolver) resolve(ctx *ExecutionContext) (*Value, error) { - var current reflect.Value - var isSafe bool - - for idx, part := range vr.parts { - if idx == 0 { - // We're looking up the first part of the variable. - // First we're having a look in our private - // context (e. g. information provided by tags, like the forloop) - val, inPrivate := ctx.Private[vr.parts[0].s] - if !inPrivate { - // Nothing found? Then have a final lookup in the public context - val = ctx.Public[vr.parts[0].s] - } - current = reflect.ValueOf(val) // Get the initial value - } else { - // Next parts, resolve it from current - - // Before resolving the pointer, let's see if we have a method to call - // Problem with resolving the pointer is we're changing the receiver - isFunc := false - if part.typ == varTypeIdent { - funcValue := current.MethodByName(part.s) - if funcValue.IsValid() { - current = funcValue - isFunc = true - } - } - - if !isFunc { - // If current a pointer, resolve it - if current.Kind() == reflect.Ptr { - current = current.Elem() - if !current.IsValid() { - // Value is not valid (anymore) - return AsValue(nil), nil - } - } - - // Look up which part must be called now - switch part.typ { - case varTypeInt: - // Calling an index is only possible for: - // * slices/arrays/strings - switch current.Kind() { - case reflect.String, reflect.Array, reflect.Slice: - if part.i >= 0 && current.Len() > part.i { - current = current.Index(part.i) - } else { - // In Django, exceeding the length of a list is just empty. - return AsValue(nil), nil - } - default: - return nil, fmt.Errorf("Can't access an index on type %s (variable %s)", - current.Kind().String(), vr.String()) - } - case varTypeIdent: - // debugging: - // fmt.Printf("now = %s (kind: %s)\n", part.s, current.Kind().String()) - - // Calling a field or key - switch current.Kind() { - case reflect.Struct: - current = current.FieldByName(part.s) - case reflect.Map: - current = current.MapIndex(reflect.ValueOf(part.s)) - default: - return nil, fmt.Errorf("Can't access a field by name on type %s (variable %s)", - current.Kind().String(), vr.String()) - } - default: - panic("unimplemented") - } - } - } - - if !current.IsValid() { - // Value is not valid (anymore) - return AsValue(nil), nil - } - - // If current is a reflect.ValueOf(pongo2.Value), then unpack it - // Happens in function calls (as a return value) or by injecting - // into the execution context (e.g. in a for-loop) - if current.Type() == typeOfValuePtr { - tmpValue := current.Interface().(*Value) - current = tmpValue.val - isSafe = tmpValue.safe - } - - // Check whether this is an interface and resolve it where required - if current.Kind() == reflect.Interface { - current = reflect.ValueOf(current.Interface()) - } - - // Check if the part is a function call - if part.isFunctionCall || current.Kind() == reflect.Func { - // Check for callable - if current.Kind() != reflect.Func { - return nil, fmt.Errorf("'%s' is not a function (it is %s)", vr.String(), current.Kind().String()) - } - - // Check for correct function syntax and types - // func(*Value, ...) *Value - t := current.Type() - currArgs := part.callingArgs - - // If an implicit ExecCtx is needed - if t.NumIn() > 0 && t.In(0) == typeOfExecCtxPtr { - currArgs = append([]functionCallArgument{executionCtxEval{}}, currArgs...) - } - - // Input arguments - if len(currArgs) != t.NumIn() && !(len(currArgs) >= t.NumIn()-1 && t.IsVariadic()) { - return nil, - fmt.Errorf("Function input argument count (%d) of '%s' must be equal to the calling argument count (%d).", - t.NumIn(), vr.String(), len(currArgs)) - } - - // Output arguments - if t.NumOut() != 1 && t.NumOut() != 2 { - return nil, fmt.Errorf("'%s' must have exactly 1 or 2 output arguments, the second argument must be of type error", vr.String()) - } - - // Evaluate all parameters - var parameters []reflect.Value - - numArgs := t.NumIn() - isVariadic := t.IsVariadic() - var fnArg reflect.Type - - for idx, arg := range currArgs { - pv, err := arg.Evaluate(ctx) - if err != nil { - return nil, err - } - - if isVariadic { - if idx >= t.NumIn()-1 { - fnArg = t.In(numArgs - 1).Elem() - } else { - fnArg = t.In(idx) - } - } else { - fnArg = t.In(idx) - } - - if fnArg != typeOfValuePtr { - // Function's argument is not a *pongo2.Value, then we have to check whether input argument is of the same type as the function's argument - if !isVariadic { - if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface { - return nil, fmt.Errorf("Function input argument %d of '%s' must be of type %s or *pongo2.Value (not %T).", - idx, vr.String(), fnArg.String(), pv.Interface()) - } - // Function's argument has another type, using the interface-value - parameters = append(parameters, reflect.ValueOf(pv.Interface())) - } else { - if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface { - return nil, fmt.Errorf("Function variadic input argument of '%s' must be of type %s or *pongo2.Value (not %T).", - vr.String(), fnArg.String(), pv.Interface()) - } - // Function's argument has another type, using the interface-value - parameters = append(parameters, reflect.ValueOf(pv.Interface())) - } - } else { - // Function's argument is a *pongo2.Value - parameters = append(parameters, reflect.ValueOf(pv)) - } - } - - // Check if any of the values are invalid - for _, p := range parameters { - if p.Kind() == reflect.Invalid { - return nil, fmt.Errorf("Calling a function using an invalid parameter") - } - } - - // Call it and get first return parameter back - values := current.Call(parameters) - rv := values[0] - if t.NumOut() == 2 { - e := values[1].Interface() - if e != nil { - err, ok := e.(error) - if !ok { - return nil, fmt.Errorf("The second return value is not an error") - } - if err != nil { - return nil, err - } - } - } - - if rv.Type() != typeOfValuePtr { - current = reflect.ValueOf(rv.Interface()) - } else { - // Return the function call value - current = rv.Interface().(*Value).val - isSafe = rv.Interface().(*Value).safe - } - } - - if !current.IsValid() { - // Value is not valid (e. g. NIL value) - return AsValue(nil), nil - } - } - - return &Value{val: current, safe: isSafe}, nil -} - -func (vr *variableResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - value, err := vr.resolve(ctx) - if err != nil { - return AsValue(nil), ctx.Error(err.Error(), vr.locationToken) - } - return value, nil -} - -func (v *nodeFilteredVariable) FilterApplied(name string) bool { - for _, filter := range v.filterChain { - if filter.name == name { - return true - } - } - return false -} - -func (v *nodeFilteredVariable) Evaluate(ctx *ExecutionContext) (*Value, *Error) { - value, err := v.resolver.Evaluate(ctx) - if err != nil { - return nil, err - } - - for _, filter := range v.filterChain { - value, err = filter.Execute(value, ctx) - if err != nil { - return nil, err - } - } - - return value, nil -} - -// IDENT | IDENT.(IDENT|NUMBER)... -func (p *Parser) parseVariableOrLiteral() (IEvaluator, *Error) { - t := p.Current() - - if t == nil { - return nil, p.Error("Unexpected EOF, expected a number, string, keyword or identifier.", p.lastToken) - } - - // Is first part a number or a string, there's nothing to resolve (because there's only to return the value then) - switch t.Typ { - case TokenNumber: - p.Consume() - - // One exception to the rule that we don't have float64 literals is at the beginning - // of an expression (or a variable name). Since we know we started with an integer - // which can't obviously be a variable name, we can check whether the first number - // is followed by dot (and then a number again). If so we're converting it to a float64. - - if p.Match(TokenSymbol, ".") != nil { - // float64 - t2 := p.MatchType(TokenNumber) - if t2 == nil { - return nil, p.Error("Expected a number after the '.'.", nil) - } - f, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", t.Val, t2.Val), 64) - if err != nil { - return nil, p.Error(err.Error(), t) - } - fr := &floatResolver{ - locationToken: t, - val: f, - } - return fr, nil - } - i, err := strconv.Atoi(t.Val) - if err != nil { - return nil, p.Error(err.Error(), t) - } - nr := &intResolver{ - locationToken: t, - val: i, - } - return nr, nil - - case TokenString: - p.Consume() - sr := &stringResolver{ - locationToken: t, - val: t.Val, - } - return sr, nil - case TokenKeyword: - p.Consume() - switch t.Val { - case "true": - br := &boolResolver{ - locationToken: t, - val: true, - } - return br, nil - case "false": - br := &boolResolver{ - locationToken: t, - val: false, - } - return br, nil - default: - return nil, p.Error("This keyword is not allowed here.", nil) - } - } - - resolver := &variableResolver{ - locationToken: t, - } - - // First part of a variable MUST be an identifier - if t.Typ != TokenIdentifier { - return nil, p.Error("Expected either a number, string, keyword or identifier.", t) - } - - resolver.parts = append(resolver.parts, &variablePart{ - typ: varTypeIdent, - s: t.Val, - }) - - p.Consume() // we consumed the first identifier of the variable name - -variableLoop: - for p.Remaining() > 0 { - t = p.Current() - - if p.Match(TokenSymbol, ".") != nil { - // Next variable part (can be either NUMBER or IDENT) - t2 := p.Current() - if t2 != nil { - switch t2.Typ { - case TokenIdentifier: - resolver.parts = append(resolver.parts, &variablePart{ - typ: varTypeIdent, - s: t2.Val, - }) - p.Consume() // consume: IDENT - continue variableLoop - case TokenNumber: - i, err := strconv.Atoi(t2.Val) - if err != nil { - return nil, p.Error(err.Error(), t2) - } - resolver.parts = append(resolver.parts, &variablePart{ - typ: varTypeInt, - i: i, - }) - p.Consume() // consume: NUMBER - continue variableLoop - default: - return nil, p.Error("This token is not allowed within a variable name.", t2) - } - } else { - // EOF - return nil, p.Error("Unexpected EOF, expected either IDENTIFIER or NUMBER after DOT.", - p.lastToken) - } - } else if p.Match(TokenSymbol, "(") != nil { - // Function call - // FunctionName '(' Comma-separated list of expressions ')' - part := resolver.parts[len(resolver.parts)-1] - part.isFunctionCall = true - argumentLoop: - for { - if p.Remaining() == 0 { - return nil, p.Error("Unexpected EOF, expected function call argument list.", p.lastToken) - } - - if p.Peek(TokenSymbol, ")") == nil { - // No closing bracket, so we're parsing an expression - exprArg, err := p.ParseExpression() - if err != nil { - return nil, err - } - part.callingArgs = append(part.callingArgs, exprArg) - - if p.Match(TokenSymbol, ")") != nil { - // If there's a closing bracket after an expression, we will stop parsing the arguments - break argumentLoop - } else { - // If there's NO closing bracket, there MUST be an comma - if p.Match(TokenSymbol, ",") == nil { - return nil, p.Error("Missing comma or closing bracket after argument.", nil) - } - } - } else { - // We got a closing bracket, so stop parsing arguments - p.Consume() - break argumentLoop - } - - } - // We're done parsing the function call, next variable part - continue variableLoop - } - - // No dot or function call? Then we're done with the variable parsing - break - } - - return resolver, nil -} - -func (p *Parser) parseVariableOrLiteralWithFilter() (*nodeFilteredVariable, *Error) { - v := &nodeFilteredVariable{ - locationToken: p.Current(), - } - - // Parse the variable name - resolver, err := p.parseVariableOrLiteral() - if err != nil { - return nil, err - } - v.resolver = resolver - - // Parse all the filters -filterLoop: - for p.Match(TokenSymbol, "|") != nil { - // Parse one single filter - filter, err := p.parseFilter() - if err != nil { - return nil, err - } - - // Check sandbox filter restriction - if _, isBanned := p.template.set.bannedFilters[filter.name]; isBanned { - return nil, p.Error(fmt.Sprintf("Usage of filter '%s' is not allowed (sandbox restriction active).", filter.name), nil) - } - - v.filterChain = append(v.filterChain, filter) - - continue filterLoop - } - - return v, nil -} - -func (p *Parser) parseVariableElement() (INode, *Error) { - node := &nodeVariable{ - locationToken: p.Current(), - } - - p.Consume() // consume '{{' - - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - node.expr = expr - - if p.Match(TokenSymbol, "}}") == nil { - return nil, p.Error("'}}' expected", nil) - } - - return node, nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE deleted file mode 100644 index 67c4fb56..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE +++ /dev/null @@ -1,187 +0,0 @@ -Copyright © 2014, Roger Peppe, Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go deleted file mode 100644 index 32e94721..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go +++ /dev/null @@ -1,97 +0,0 @@ -package bakery - -import ( - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -// Bakery is a convenience type that contains both an Oven -// and a Checker. -type Bakery struct { - Oven *Oven - Checker *Checker -} - -// BakeryParams holds a selection of parameters for the Oven -// and the Checker created by New. -// -// For more fine-grained control of parameters, create the -// Oven or Checker directly. -// -// The zero value is OK to use, but won't allow any authentication -// or third party caveats to be added. -type BakeryParams struct { - // Logger is used to send log messages. If it is nil, - // nothing will be logged. - Logger Logger - - // Checker holds the checker used to check first party caveats. - // If this is nil, New will use checkers.New(nil). - Checker FirstPartyCaveatChecker - - // RootKeyStore holds the root key store to use. If you need to - // use a different root key store for different operations, - // you'll need to pass a RootKeyStoreForOps value to NewOven - // directly. - // - // If this is nil, New will use NewMemRootKeyStore(). - // Note that that is almost certain insufficient for production services - // that are spread across multiple instances or that need - // to persist keys across restarts. - RootKeyStore RootKeyStore - - // Locator is used to find out information on third parties when - // adding third party caveats. If this is nil, no non-local third - // party caveats can be added. - Locator ThirdPartyLocator - - // Key holds the private key of the oven. If this is nil, - // no third party caveats may be added. - Key *KeyPair - - // OpsAuthorizer is used to check whether operations are authorized - // by some other already-authorized operation. If it is nil, - // NewChecker will assume no operation is authorized by any - // operation except itself. - OpsAuthorizer OpsAuthorizer - - // Location holds the location to use when creating new macaroons. - Location string - - // LegacyMacaroonOp holds the operation to associate with old - // macaroons that don't have associated operations. - // If this is empty, legacy macaroons will not be associated - // with any operations. - LegacyMacaroonOp Op -} - -// New returns a new Bakery instance which combines an Oven with a -// Checker for the convenience of callers that wish to use both -// together. -func New(p BakeryParams) *Bakery { - if p.Checker == nil { - p.Checker = checkers.New(nil) - } - ovenParams := OvenParams{ - Key: p.Key, - Namespace: p.Checker.Namespace(), - Location: p.Location, - Locator: p.Locator, - LegacyMacaroonOp: p.LegacyMacaroonOp, - } - if p.RootKeyStore != nil { - ovenParams.RootKeyStoreForOps = func(ops []Op) RootKeyStore { - return p.RootKeyStore - } - } - oven := NewOven(ovenParams) - - checker := NewChecker(CheckerParams{ - Checker: p.Checker, - MacaroonVerifier: oven, - OpsAuthorizer: p.OpsAuthorizer, - }) - return &Bakery{ - Oven: oven, - Checker: checker, - } -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go deleted file mode 100644 index b864e2b1..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go +++ /dev/null @@ -1,503 +0,0 @@ -package bakery - -import ( - "context" - "sort" - "sync" - "time" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v2" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -// Op holds an entity and action to be authorized on that entity. -type Op struct { - // Entity holds the name of the entity to be authorized. - // Entity names should not contain spaces and should - // not start with the prefix "login" or "multi-" (conventionally, - // entity names will be prefixed with the entity type followed - // by a hyphen. - Entity string - - // Action holds the action to perform on the entity, such as "read" - // or "delete". It is up to the service using a checker to define - // a set of operations and keep them consistent over time. - Action string -} - -// NoOp holds the empty operation, signifying no authorized -// operation. This is always considered to be authorized. -// See OpsAuthorizer for one place that it's used. -var NoOp = Op{} - -// CheckerParams holds parameters for NewChecker. -type CheckerParams struct { - // Checker is used to check first party caveats when authorizing. - // If this is nil NewChecker will use checkers.New(nil). - Checker FirstPartyCaveatChecker - - // OpsAuthorizer is used to check whether operations are authorized - // by some other already-authorized operation. If it is nil, - // NewChecker will assume no operation is authorized by any - // operation except itself. - OpsAuthorizer OpsAuthorizer - - // MacaroonVerifier is used to verify macaroons. - MacaroonVerifier MacaroonVerifier - - // Logger is used to log checker operations. If it is nil, - // DefaultLogger("bakery") will be used. - Logger Logger -} - -// OpsAuthorizer is used to check whether an operation authorizes some other -// operation. For example, a macaroon with an operation allowing general access to a service -// might also grant access to a more specific operation. -type OpsAuthorizer interface { - // AuthorizeOp reports which elements of queryOps are authorized by - // authorizedOp. On return, each element of the slice should represent - // whether the respective element in queryOps has been authorized. - // An empty returned slice indicates that no operations are authorized. - // AuthorizeOps may also return third party caveats that apply to - // the authorized operations. Access will only be authorized when - // those caveats are discharged by the client. - // - // When not all operations can be authorized with the macaroons - // supplied to Checker.Auth, the checker will call AuthorizeOps - // with NoOp, because some operations might be authorized - // regardless of authority. NoOp will always be the last - // operation queried within any given Allow call. - // - // AuthorizeOps should only return an error if authorization cannot be checked - // (for example because of a database access failure), not because - // authorization was denied. - AuthorizeOps(ctx context.Context, authorizedOp Op, queryOps []Op) ([]bool, []checkers.Caveat, error) -} - -// AuthInfo information about an authorization decision. -type AuthInfo struct { - // Macaroons holds all the macaroons that were - // passed to Auth. - Macaroons []macaroon.Slice - - // Used records which macaroons were used in the - // authorization decision. It holds one element for - // each element of Macaroons. Macaroons that - // were invalid or unnecessary will have a false entry. - Used []bool - - // OpIndexes holds the index of each macaroon - // that was used to authorize an operation. - OpIndexes map[Op]int -} - -// Conditions returns the first party caveat caveat conditions hat apply to -// the given AuthInfo. This can be used to apply appropriate caveats -// to capability macaroons granted via a Checker.Allow call. -func (a *AuthInfo) Conditions() []string { - var squasher caveatSquasher - for i, ms := range a.Macaroons { - if !a.Used[i] { - continue - } - for _, m := range ms { - for _, cav := range m.Caveats() { - if len(cav.VerificationId) > 0 { - continue - } - squasher.add(string(cav.Id)) - } - } - } - return squasher.final() -} - -// Checker wraps a FirstPartyCaveatChecker and adds authentication and authorization checks. -// -// It uses macaroons as authorization tokens but it is not itself responsible for -// creating the macaroons - see the Oven type (TODO) for one way of doing that. -type Checker struct { - FirstPartyCaveatChecker - p CheckerParams -} - -// NewChecker returns a new Checker using the given parameters. -func NewChecker(p CheckerParams) *Checker { - if p.Checker == nil { - p.Checker = checkers.New(nil) - } - if p.Logger == nil { - p.Logger = DefaultLogger("bakery") - } - return &Checker{ - FirstPartyCaveatChecker: p.Checker, - p: p, - } -} - -// Auth makes a new AuthChecker instance using the -// given macaroons to inform authorization decisions. -func (c *Checker) Auth(mss ...macaroon.Slice) *AuthChecker { - return &AuthChecker{ - Checker: c, - macaroons: mss, - } -} - -// AuthChecker authorizes operations with respect to a user's request. -type AuthChecker struct { - // Checker is used to check first party caveats. - *Checker - macaroons []macaroon.Slice - // conditions holds the first party caveat conditions - // that apply to each of the above macaroons. - conditions [][]string - initOnce sync.Once - initError error - initErrors []error - // authIndexes holds for each potentially authorized operation - // the indexes of the macaroons that authorize it. - authIndexes map[Op][]int -} - -func (a *AuthChecker) init(ctx context.Context) error { - a.initOnce.Do(func() { - a.initError = a.initOnceFunc(ctx) - }) - return a.initError -} - -func (a *AuthChecker) initOnceFunc(ctx context.Context) error { - a.authIndexes = make(map[Op][]int) - a.conditions = make([][]string, len(a.macaroons)) - for i, ms := range a.macaroons { - ops, conditions, err := a.p.MacaroonVerifier.VerifyMacaroon(ctx, ms) - if err != nil { - if !isVerificationError(err) { - return errgo.Notef(err, "cannot retrieve macaroon") - } - a.initErrors = append(a.initErrors, errgo.Mask(err)) - continue - } - a.p.Logger.Debugf(ctx, "macaroon %d has valid sig; ops %q, conditions %q", i, ops, conditions) - // It's a valid macaroon (in principle - we haven't checked first party caveats). - a.conditions[i] = conditions - for _, op := range ops { - a.authIndexes[op] = append(a.authIndexes[op], i) - } - } - return nil -} - -// Allowed returns an AuthInfo that provides information on all -// operations directly authorized by the macaroons provided -// to Checker.Auth. Note that this does not include operations that would be indirectly -// allowed via the OpAuthorizer. -// -// Allowed returns an error only when there is an underlying storage failure, -// not when operations are not authorized. -func (a *AuthChecker) Allowed(ctx context.Context) (*AuthInfo, error) { - actx, err := a.newAllowContext(ctx, nil) - if err != nil { - return nil, errgo.Mask(err) - } - for op, mindexes := range a.authIndexes { - for _, mindex := range mindexes { - if actx.status[mindex]&statusOK != 0 { - actx.status[mindex] |= statusUsed - actx.opIndexes[op] = mindex - break - } - } - } - return actx.newAuthInfo(), nil -} - -func (a *allowContext) newAuthInfo() *AuthInfo { - info := &AuthInfo{ - Macaroons: a.checker.macaroons, - Used: make([]bool, len(a.checker.macaroons)), - OpIndexes: a.opIndexes, - } - for i, status := range a.status { - if status&statusUsed != 0 { - info.Used[i] = true - } - } - return info -} - -// allowContext holds temporary state used by AuthChecker.allowAny. -type allowContext struct { - checker *AuthChecker - - // status holds used and authorized status of all the - // request macaroons. - status []macaroonStatus - - // opIndex holds an entry for each authorized operation - // that refers to the macaroon that authorized that operation. - opIndexes map[Op]int - - // authed holds which of the requested operations have - // been authorized so far. - authed []bool - - // need holds all of the requested operations that - // are remaining to be authorized. needIndex holds the - // index of each of these operations in the original operations slice - need []Op - needIndex []int - - // errors holds any errors encountered during authorization. - errors []error -} - -type macaroonStatus uint8 - -const ( - statusOK = 1 << iota - statusUsed -) - -func (a *AuthChecker) newAllowContext(ctx context.Context, ops []Op) (*allowContext, error) { - actx := &allowContext{ - checker: a, - status: make([]macaroonStatus, len(a.macaroons)), - authed: make([]bool, len(ops)), - need: append([]Op(nil), ops...), - needIndex: make([]int, len(ops)), - opIndexes: make(map[Op]int), - } - for i := range actx.needIndex { - actx.needIndex[i] = i - } - if err := a.init(ctx); err != nil { - return actx, errgo.Mask(err) - } - // Check all the macaroons with respect to the current context. - // Technically this is more than we need to do, because some - // of the macaroons might not authorize the specific operations - // we're interested in, but that's an optimisation that could happen - // later if performance becomes an issue with respect to that. -outer: - for i, ms := range a.macaroons { - ctx := checkers.ContextWithMacaroons(ctx, a.Namespace(), ms) - for _, cond := range a.conditions[i] { - if err := a.CheckFirstPartyCaveat(ctx, cond); err != nil { - actx.addError(err) - continue outer - } - } - actx.status[i] = statusOK - } - return actx, nil -} - -// Macaroons returns the macaroons that were passed -// to Checker.Auth when creating the AuthChecker. -func (a *AuthChecker) Macaroons() []macaroon.Slice { - return a.macaroons -} - -// Allow checks that the authorizer's request is authorized to -// perform all the given operations. -// -// If all the operations are allowed, an AuthInfo is returned holding -// details of the decision. -// -// If an operation was not allowed, an error will be returned which may -// be *DischargeRequiredError holding the operations that remain to -// be authorized in order to allow authorization to -// proceed. -func (a *AuthChecker) Allow(ctx context.Context, ops ...Op) (*AuthInfo, error) { - actx, err := a.newAllowContext(ctx, ops) - if err != nil { - return nil, errgo.Mask(err) - } - actx.checkDirect(ctx) - if len(actx.need) == 0 { - return actx.newAuthInfo(), nil - } - caveats, err := actx.checkIndirect(ctx) - if err != nil { - return nil, errgo.Mask(err) - } - if len(actx.need) == 0 && len(caveats) == 0 { - // No more ops need to be authenticated and no caveats to be discharged. - return actx.newAuthInfo(), nil - } - a.p.Logger.Debugf(ctx, "operations still needed after auth check: %#v", actx.need) - if len(caveats) == 0 || len(actx.need) > 0 { - allErrors := make([]error, 0, len(a.initErrors)+len(actx.errors)) - allErrors = append(allErrors, a.initErrors...) - allErrors = append(allErrors, actx.errors...) - var err error - if len(allErrors) > 0 { - // TODO return all errors? - a.p.Logger.Infof(ctx, "all auth errors: %q", allErrors) - err = allErrors[0] - } - return nil, errgo.WithCausef(err, ErrPermissionDenied, "") - } - return nil, &DischargeRequiredError{ - Message: "some operations have extra caveats", - Ops: ops, - Caveats: caveats, - } -} - -// checkDirect checks which operations are directly authorized by -// the macaroon operations. -func (a *allowContext) checkDirect(ctx context.Context) { - defer a.updateNeed() - for i, op := range a.need { - if op == NoOp { - // NoOp is always authorized. - a.authed[a.needIndex[i]] = true - continue - } - for _, mindex := range a.checker.authIndexes[op] { - if a.status[mindex]&statusOK != 0 { - a.authed[a.needIndex[i]] = true - a.status[mindex] |= statusUsed - a.opIndexes[op] = mindex - break - } - } - } -} - -// checkIndirect checks to see if any of the remaining operations are authorized -// indirectly with the already-authorized operations. -func (a *allowContext) checkIndirect(ctx context.Context) ([]checkers.Caveat, error) { - if a.checker.p.OpsAuthorizer == nil { - return nil, nil - } - var allCaveats []checkers.Caveat - for op, mindexes := range a.checker.authIndexes { - if len(a.need) == 0 { - break - } - for _, mindex := range mindexes { - if a.status[mindex]&statusOK == 0 { - continue - } - ctx := checkers.ContextWithMacaroons(ctx, a.checker.Namespace(), a.checker.macaroons[mindex]) - authedOK, caveats, err := a.checker.p.OpsAuthorizer.AuthorizeOps(ctx, op, a.need) - if err != nil { - return nil, errgo.Mask(err) - } - // TODO we could perhaps combine identical third party caveats here. - allCaveats = append(allCaveats, caveats...) - for i, ok := range authedOK { - if !ok { - continue - } - // Operation is authorized. Mark the appropriate macaroon as used, - // and remove the operation from the needed list so that we don't - // bother AuthorizeOps with it again. - a.status[mindex] |= statusUsed - a.authed[a.needIndex[i]] = true - a.opIndexes[a.need[i]] = mindex - } - } - a.updateNeed() - } - if len(a.need) == 0 { - return allCaveats, nil - } - // We've still got at least one operation unauthorized. - // Try to see if it can be authorized with no operation at all. - authedOK, caveats, err := a.checker.p.OpsAuthorizer.AuthorizeOps(ctx, NoOp, a.need) - if err != nil { - return nil, errgo.Mask(err) - } - allCaveats = append(allCaveats, caveats...) - for i, ok := range authedOK { - if ok { - a.authed[a.needIndex[i]] = true - } - } - a.updateNeed() - return allCaveats, nil -} - -// updateNeed removes all authorized operations from a.need -// and updates a.needIndex appropriately too. -func (a *allowContext) updateNeed() { - j := 0 - for i, opIndex := range a.needIndex { - if a.authed[opIndex] { - continue - } - if i != j { - a.need[j], a.needIndex[j] = a.need[i], a.needIndex[i] - } - j++ - } - a.need, a.needIndex = a.need[0:j], a.needIndex[0:j] -} - -func (a *allowContext) addError(err error) { - a.errors = append(a.errors, err) -} - -// caveatSquasher rationalizes first party caveats created for a capability -// by: -// - including only the earliest time-before caveat. -// - removing duplicates. -type caveatSquasher struct { - expiry time.Time - conds []string -} - -func (c *caveatSquasher) add(cond string) { - if c.add0(cond) { - c.conds = append(c.conds, cond) - } -} - -func (c *caveatSquasher) add0(cond string) bool { - cond, args, err := checkers.ParseCaveat(cond) - if err != nil { - // Be safe - if we can't parse the caveat, just leave it there. - return true - } - if cond != checkers.CondTimeBefore { - return true - } - et, err := time.Parse(time.RFC3339Nano, args) - if err != nil || et.IsZero() { - // Again, if it doesn't seem valid, leave it alone. - return true - } - if c.expiry.IsZero() || et.Before(c.expiry) { - c.expiry = et - } - return false -} - -func (c *caveatSquasher) final() []string { - if !c.expiry.IsZero() { - c.conds = append(c.conds, checkers.TimeBeforeCaveat(c.expiry).Condition) - } - if len(c.conds) == 0 { - return nil - } - // Make deterministic and eliminate duplicates. - sort.Strings(c.conds) - prev := c.conds[0] - j := 1 - for _, cond := range c.conds[1:] { - if cond != prev { - c.conds[j] = cond - prev = cond - j++ - } - } - c.conds = c.conds[:j] - return c.conds -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go deleted file mode 100644 index 153b31d2..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go +++ /dev/null @@ -1,246 +0,0 @@ -// The checkers package provides some standard first-party -// caveat checkers and some primitives for combining them. -package checkers - -import ( - "context" - "fmt" - "sort" - "strings" - - "gopkg.in/errgo.v1" -) - -// StdNamespace holds the URI of the standard checkers schema. -const StdNamespace = "std" - -// Constants for all the standard caveat conditions. -// First and third party caveat conditions are both defined here, -// even though notionally they exist in separate name spaces. -const ( - CondDeclared = "declared" - CondTimeBefore = "time-before" - CondError = "error" -) - -const ( - CondNeedDeclared = "need-declared" -) - -// Func is the type of a function used by Checker to check a caveat. The -// cond parameter will hold the caveat condition including any namespace -// prefix; the arg parameter will hold any additional caveat argument -// text. -type Func func(ctx context.Context, cond, arg string) error - -// CheckerInfo holds information on a registered checker. -type CheckerInfo struct { - // Check holds the actual checker function. - Check Func - // Prefix holds the prefix for the checker condition. - Prefix string - // Name holds the name of the checker condition. - Name string - // Namespace holds the namespace URI for the checker's - // schema. - Namespace string -} - -var allCheckers = map[string]Func{ - CondTimeBefore: checkTimeBefore, - CondDeclared: checkDeclared, - CondError: checkError, -} - -// NewEmpty returns a checker using the given namespace -// that has no registered checkers. -// If ns is nil, a new one will be created. -func NewEmpty(ns *Namespace) *Checker { - if ns == nil { - ns = NewNamespace(nil) - } - return &Checker{ - namespace: ns, - checkers: make(map[string]CheckerInfo), - } -} - -// RegisterStd registers all the standard checkers in the given checker. -// If not present already, the standard checkers schema (StdNamespace) is -// added to the checker's namespace with an empty prefix. -func RegisterStd(c *Checker) { - c.namespace.Register(StdNamespace, "") - for cond, check := range allCheckers { - c.Register(cond, StdNamespace, check) - } -} - -// New returns a checker with all the standard caveats checkers registered. -// If ns is nil, a new one will be created. -// The standard namespace is also added to ns if not present. -func New(ns *Namespace) *Checker { - c := NewEmpty(ns) - RegisterStd(c) - return c -} - -// Checker holds a set of checkers for first party caveats. -// It implements bakery.CheckFirstParty caveat. -type Checker struct { - namespace *Namespace - checkers map[string]CheckerInfo -} - -// Register registers the given condition in the given namespace URI -// to be checked with the given check function. -// It will panic if the namespace is not registered or -// if the condition has already been registered. -func (c *Checker) Register(cond, uri string, check Func) { - if check == nil { - panic(fmt.Errorf("nil check function registered for namespace %q when registering condition %q", uri, cond)) - } - prefix, ok := c.namespace.Resolve(uri) - if !ok { - panic(fmt.Errorf("no prefix registered for namespace %q when registering condition %q", uri, cond)) - } - if prefix == "" && strings.Contains(cond, ":") { - panic(fmt.Errorf("caveat condition %q in namespace %q contains a colon but its prefix is empty", cond, uri)) - } - fullCond := ConditionWithPrefix(prefix, cond) - if info, ok := c.checkers[fullCond]; ok { - panic(fmt.Errorf("checker for %q (namespace %q) already registered in namespace %q", fullCond, uri, info.Namespace)) - } - c.checkers[fullCond] = CheckerInfo{ - Check: check, - Namespace: uri, - Name: cond, - Prefix: prefix, - } -} - -// Info returns information on all the registered checkers, sorted by namespace -// and then name. -func (c *Checker) Info() []CheckerInfo { - checkers := make([]CheckerInfo, 0, len(c.checkers)) - for _, c := range c.checkers { - checkers = append(checkers, c) - } - sort.Sort(checkerInfoByName(checkers)) - return checkers -} - -// Namespace returns the namespace associated with the -// checker. It implements bakery.FirstPartyCaveatChecker.Namespace. -func (c *Checker) Namespace() *Namespace { - return c.namespace -} - -// CheckFirstPartyCaveat implements bakery.FirstPartyCaveatChecker -// by checking the caveat against all registered caveats conditions. -func (c *Checker) CheckFirstPartyCaveat(ctx context.Context, cav string) error { - cond, arg, err := ParseCaveat(cav) - if err != nil { - // If we can't parse it, perhaps it's in some other format, - // return a not-recognised error. - return errgo.WithCausef(err, ErrCaveatNotRecognized, "cannot parse caveat %q", cav) - } - cf, ok := c.checkers[cond] - if !ok { - return errgo.NoteMask(ErrCaveatNotRecognized, fmt.Sprintf("caveat %q not satisfied", cav), errgo.Any) - } - if err := cf.Check(ctx, cond, arg); err != nil { - return errgo.NoteMask(err, fmt.Sprintf("caveat %q not satisfied", cav), errgo.Any) - } - return nil -} - -var errBadCaveat = errgo.New("bad caveat") - -func checkError(ctx context.Context, _, arg string) error { - return errBadCaveat -} - -// ErrCaveatNotRecognized is the cause of errors returned -// from caveat checkers when the caveat was not -// recognized. -var ErrCaveatNotRecognized = errgo.New("caveat not recognized") - -// Caveat represents a condition that must be true for a check to -// complete successfully. If Location is non-empty, the caveat must be -// discharged by a third party at the given location. -// The Namespace field holds the namespace URI of the -// condition - if it is non-empty, it will be converted to -// a namespace prefix before adding to the macaroon. -type Caveat struct { - Condition string - Namespace string - Location string -} - -// Condition builds a caveat condition from the given name and argument. -func Condition(name, arg string) string { - if arg == "" { - return name - } - return name + " " + arg -} - -func firstParty(name, arg string) Caveat { - return Caveat{ - Condition: Condition(name, arg), - Namespace: StdNamespace, - } -} - -// ParseCaveat parses a caveat into an identifier, identifying the -// checker that should be used, and the argument to the checker (the -// rest of the string). -// -// The identifier is taken from all the characters before the first -// space character. -func ParseCaveat(cav string) (cond, arg string, err error) { - if cav == "" { - return "", "", fmt.Errorf("empty caveat") - } - i := strings.IndexByte(cav, ' ') - if i < 0 { - return cav, "", nil - } - if i == 0 { - return "", "", fmt.Errorf("caveat starts with space character") - } - return cav[0:i], cav[i+1:], nil -} - -// ErrorCaveatf returns a caveat that will never be satisfied, holding -// the given fmt.Sprintf formatted text as the text of the caveat. -// -// This should only be used for highly unusual conditions that are never -// expected to happen in practice, such as a malformed key that is -// conventionally passed as a constant. It's not a panic but you should -// only use it in cases where a panic might possibly be appropriate. -// -// This mechanism means that caveats can be created without error -// checking and a later systematic check at a higher level (in the -// bakery package) can produce an error instead. -func ErrorCaveatf(f string, a ...interface{}) Caveat { - return firstParty(CondError, fmt.Sprintf(f, a...)) -} - -type checkerInfoByName []CheckerInfo - -func (c checkerInfoByName) Less(i, j int) bool { - info0, info1 := &c[i], &c[j] - if info0.Namespace != info1.Namespace { - return info0.Namespace < info1.Namespace - } - return info0.Name < info1.Name -} - -func (c checkerInfoByName) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -func (c checkerInfoByName) Len() int { - return len(c) -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go deleted file mode 100644 index f41d6c98..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go +++ /dev/null @@ -1,137 +0,0 @@ -package checkers - -import ( - "context" - "strings" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v2" -) - -type macaroonsKey struct{} - -type macaroonsValue struct { - ns *Namespace - ms macaroon.Slice -} - -// ContextWithMacaroons returns the given context associated with a -// macaroon slice and the name space to use to interpret caveats in -// the macaroons. -func ContextWithMacaroons(ctx context.Context, ns *Namespace, ms macaroon.Slice) context.Context { - return context.WithValue(ctx, macaroonsKey{}, macaroonsValue{ - ns: ns, - ms: ms, - }) -} - -// MacaroonsFromContext returns the namespace and macaroons associated -// with the context by ContextWithMacaroons. This can be used to -// implement "structural" first-party caveats that are predicated on -// the macaroons being validated. -func MacaroonsFromContext(ctx context.Context) (*Namespace, macaroon.Slice) { - v, _ := ctx.Value(macaroonsKey{}).(macaroonsValue) - return v.ns, v.ms -} - -// DeclaredCaveat returns a "declared" caveat asserting that the given key is -// set to the given value. If a macaroon has exactly one first party -// caveat asserting the value of a particular key, then InferDeclared -// will be able to infer the value, and then DeclaredChecker will allow -// the declared value if it has the value specified here. -// -// If the key is empty or contains a space, DeclaredCaveat -// will return an error caveat. -func DeclaredCaveat(key string, value string) Caveat { - if strings.Contains(key, " ") || key == "" { - return ErrorCaveatf("invalid caveat 'declared' key %q", key) - } - return firstParty(CondDeclared, key+" "+value) -} - -// NeedDeclaredCaveat returns a third party caveat that -// wraps the provided third party caveat and requires -// that the third party must add "declared" caveats for -// all the named keys. -// TODO(rog) namespaces in third party caveats? -func NeedDeclaredCaveat(cav Caveat, keys ...string) Caveat { - if cav.Location == "" { - return ErrorCaveatf("need-declared caveat is not third-party") - } - return Caveat{ - Location: cav.Location, - Condition: CondNeedDeclared + " " + strings.Join(keys, ",") + " " + cav.Condition, - } -} - -func checkDeclared(ctx context.Context, _, arg string) error { - parts := strings.SplitN(arg, " ", 2) - if len(parts) != 2 { - return errgo.Newf("declared caveat has no value") - } - ns, ms := MacaroonsFromContext(ctx) - attrs := InferDeclared(ns, ms) - val, ok := attrs[parts[0]] - if !ok { - return errgo.Newf("got %s=null, expected %q", parts[0], parts[1]) - } - if val != parts[1] { - return errgo.Newf("got %s=%q, expected %q", parts[0], val, parts[1]) - } - return nil -} - -// InferDeclared retrieves any declared information from -// the given macaroons and returns it as a key-value map. -// -// Information is declared with a first party caveat as created -// by DeclaredCaveat. -// -// If there are two caveats that declare the same key with -// different values, the information is omitted from the map. -// When the caveats are later checked, this will cause the -// check to fail. -func InferDeclared(ns *Namespace, ms macaroon.Slice) map[string]string { - var conditions []string - for _, m := range ms { - for _, cav := range m.Caveats() { - if cav.Location == "" { - conditions = append(conditions, string(cav.Id)) - } - } - } - return InferDeclaredFromConditions(ns, conditions) -} - -// InferDeclaredFromConditions is like InferDeclared except that -// it is passed a set of first party caveat conditions rather than a set of macaroons. -func InferDeclaredFromConditions(ns *Namespace, conds []string) map[string]string { - var conflicts []string - // If we can't resolve that standard namespace, then we'll look for - // just bare "declared" caveats which will work OK for legacy - // macaroons with no namespace. - prefix, _ := ns.Resolve(StdNamespace) - declaredCond := prefix + CondDeclared - - info := make(map[string]string) - for _, cond := range conds { - name, rest, _ := ParseCaveat(cond) - if name != declaredCond { - continue - } - parts := strings.SplitN(rest, " ", 2) - if len(parts) != 2 { - continue - } - key, val := parts[0], parts[1] - if oldVal, ok := info[key]; ok && oldVal != val { - conflicts = append(conflicts, key) - continue - } - info[key] = val - } - for _, key := range conflicts { - delete(info, key) - } - return info -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go deleted file mode 100644 index 8fbc8f87..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go +++ /dev/null @@ -1,214 +0,0 @@ -package checkers - -import ( - "sort" - "strings" - "unicode" - "unicode/utf8" - - "gopkg.in/errgo.v1" -) - -// Namespace holds maps from schema URIs to the -// prefixes that are used to encode them in first party -// caveats. Several different URIs may map to the same -// prefix - this is usual when several different backwardly -// compatible schema versions are registered. -type Namespace struct { - uriToPrefix map[string]string -} - -// Equal reports whether ns2 encodes the same namespace -// as the receiver. -func (ns1 *Namespace) Equal(ns2 *Namespace) bool { - if ns1 == ns2 || ns1 == nil || ns2 == nil { - return ns1 == ns2 - } - if len(ns1.uriToPrefix) != len(ns2.uriToPrefix) { - return false - } - for k, v := range ns1.uriToPrefix { - if ns2.uriToPrefix[k] != v { - return false - } - } - return true -} - -// NewNamespace returns a new namespace with the -// given initial contents. It will panic if any of the -// URI keys or their associated prefix are invalid -// (see IsValidSchemaURI and IsValidPrefix). -func NewNamespace(uriToPrefix map[string]string) *Namespace { - ns := &Namespace{ - uriToPrefix: make(map[string]string), - } - for uri, prefix := range uriToPrefix { - ns.Register(uri, prefix) - } - return ns -} - -// String returns the namespace representation as returned by -// ns.MarshalText. -func (ns *Namespace) String() string { - data, _ := ns.MarshalText() - return string(data) -} - -// MarshalText implements encoding.TextMarshaler by -// returning all the elements in the namespace sorted by -// URI, joined to the associated prefix with a colon and -// separated with spaces. -func (ns *Namespace) MarshalText() ([]byte, error) { - if ns == nil || len(ns.uriToPrefix) == 0 { - return nil, nil - } - uris := make([]string, 0, len(ns.uriToPrefix)) - dataLen := 0 - for uri, prefix := range ns.uriToPrefix { - uris = append(uris, uri) - dataLen += len(uri) + 1 + len(prefix) + 1 - } - sort.Strings(uris) - data := make([]byte, 0, dataLen) - for i, uri := range uris { - if i > 0 { - data = append(data, ' ') - } - data = append(data, uri...) - data = append(data, ':') - data = append(data, ns.uriToPrefix[uri]...) - } - return data, nil -} - -func (ns *Namespace) UnmarshalText(data []byte) error { - uriToPrefix := make(map[string]string) - elems := strings.Fields(string(data)) - for _, elem := range elems { - i := strings.LastIndex(elem, ":") - if i == -1 { - return errgo.Newf("no colon in namespace field %q", elem) - } - uri, prefix := elem[0:i], elem[i+1:] - if !IsValidSchemaURI(uri) { - // Currently this can't happen because the only invalid URIs - // are those which contain a space - return errgo.Newf("invalid URI %q in namespace field %q", uri, elem) - } - if !IsValidPrefix(prefix) { - return errgo.Newf("invalid prefix %q in namespace field %q", prefix, elem) - } - if _, ok := uriToPrefix[uri]; ok { - return errgo.Newf("duplicate URI %q in namespace %q", uri, data) - } - uriToPrefix[uri] = prefix - } - ns.uriToPrefix = uriToPrefix - return nil -} - -// EnsureResolved tries to resolve the given schema URI to a prefix and -// returns the prefix and whether the resolution was successful. If the -// URI hasn't been registered but a compatible version has, the -// given URI is registered with the same prefix. -func (ns *Namespace) EnsureResolved(uri string) (string, bool) { - // TODO(rog) compatibility - return ns.Resolve(uri) -} - -// Resolve resolves the given schema URI to its registered prefix and -// returns the prefix and whether the resolution was successful. -// -// If ns is nil, it is treated as if it were empty. -// -// Resolve does not mutate ns and may be called concurrently -// with other non-mutating Namespace methods. -func (ns *Namespace) Resolve(uri string) (string, bool) { - if ns == nil { - return "", false - } - prefix, ok := ns.uriToPrefix[uri] - return prefix, ok -} - -// ResolveCaveat resolves the given caveat by using -// Resolve to map from its schema namespace to the appropriate prefix using -// Resolve. If there is no registered prefix for the namespace, -// it returns an error caveat. -// -// If ns.Namespace is empty or ns.Location is non-empty, it returns cav unchanged. -// -// If ns is nil, it is treated as if it were empty. -// -// ResolveCaveat does not mutate ns and may be called concurrently -// with other non-mutating Namespace methods. -func (ns *Namespace) ResolveCaveat(cav Caveat) Caveat { - // TODO(rog) If a namespace isn't registered, try to resolve it by - // resolving it to the latest compatible version that is - // registered. - if cav.Namespace == "" || cav.Location != "" { - return cav - } - prefix, ok := ns.Resolve(cav.Namespace) - if !ok { - errCav := ErrorCaveatf("caveat %q in unregistered namespace %q", cav.Condition, cav.Namespace) - if errCav.Namespace != cav.Namespace { - prefix, _ = ns.Resolve(errCav.Namespace) - } - cav = errCav - } - if prefix != "" { - cav.Condition = ConditionWithPrefix(prefix, cav.Condition) - } - cav.Namespace = "" - return cav -} - -// ConditionWithPrefix returns the given string prefixed by the -// given prefix. If the prefix is non-empty, a colon -// is used to separate them. -func ConditionWithPrefix(prefix, condition string) string { - if prefix == "" { - return condition - } - return prefix + ":" + condition -} - -// Register registers the given URI and associates it -// with the given prefix. If the URI has already been registered, -// this is a no-op. -func (ns *Namespace) Register(uri, prefix string) { - if !IsValidSchemaURI(uri) { - panic(errgo.Newf("cannot register invalid URI %q (prefix %q)", uri, prefix)) - } - if !IsValidPrefix(prefix) { - panic(errgo.Newf("cannot register invalid prefix %q for URI %q", prefix, uri)) - } - if _, ok := ns.uriToPrefix[uri]; !ok { - ns.uriToPrefix[uri] = prefix - } -} - -func invalidSchemaRune(r rune) bool { - return unicode.IsSpace(r) -} - -// IsValidSchemaURI reports whether the given argument is suitable for -// use as a namespace schema URI. It must be non-empty, a valid UTF-8 -// string and it must not contain white space. -func IsValidSchemaURI(uri string) bool { - // TODO more stringent requirements? - return len(uri) > 0 && - utf8.ValidString(uri) && - strings.IndexFunc(uri, invalidSchemaRune) == -1 -} - -func invalidPrefixRune(r rune) bool { - return r == ' ' || r == ':' || unicode.IsSpace(r) -} - -func IsValidPrefix(prefix string) bool { - return utf8.ValidString(prefix) && strings.IndexFunc(prefix, invalidPrefixRune) == -1 -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go deleted file mode 100644 index bd71cbbc..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go +++ /dev/null @@ -1,97 +0,0 @@ -package checkers - -import ( - "context" - "fmt" - "time" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v2" -) - -// Clock represents a clock that can be faked for testing purposes. -type Clock interface { - Now() time.Time -} - -type timeKey struct{} - -func ContextWithClock(ctx context.Context, clock Clock) context.Context { - if clock == nil { - return ctx - } - return context.WithValue(ctx, timeKey{}, clock) -} - -func clockFromContext(ctx context.Context) Clock { - c, _ := ctx.Value(timeKey{}).(Clock) - return c -} - -func checkTimeBefore(ctx context.Context, _, arg string) error { - var now time.Time - if clock := clockFromContext(ctx); clock != nil { - now = clock.Now() - } else { - now = time.Now() - } - t, err := time.Parse(time.RFC3339Nano, arg) - if err != nil { - return errgo.Mask(err) - } - if !now.Before(t) { - return fmt.Errorf("macaroon has expired") - } - return nil -} - -// TimeBeforeCaveat returns a caveat that specifies that -// the time that it is checked should be before t. -func TimeBeforeCaveat(t time.Time) Caveat { - return firstParty(CondTimeBefore, t.UTC().Format(time.RFC3339Nano)) -} - -// ExpiryTime returns the minimum time of any time-before caveats found -// in the given slice and whether there were any such caveats found. -// -// The ns parameter is used to determine the standard namespace prefix - if -// the standard namespace is not found, the empty prefix is assumed. -func ExpiryTime(ns *Namespace, cavs []macaroon.Caveat) (time.Time, bool) { - prefix, _ := ns.Resolve(StdNamespace) - timeBeforeCond := ConditionWithPrefix(prefix, CondTimeBefore) - var t time.Time - var expires bool - for _, cav := range cavs { - cav := string(cav.Id) - name, rest, _ := ParseCaveat(cav) - if name != timeBeforeCond { - continue - } - et, err := time.Parse(time.RFC3339Nano, rest) - if err != nil { - continue - } - if !expires || et.Before(t) { - t = et - expires = true - } - } - return t, expires -} - -// MacaroonsExpiryTime returns the minimum time of any time-before -// caveats found in the given macaroons and whether there were -// any such caveats found. -func MacaroonsExpiryTime(ns *Namespace, ms macaroon.Slice) (time.Time, bool) { - var t time.Time - var expires bool - for _, m := range ms { - if et, ex := ExpiryTime(ns, m.Caveats()); ex { - if !expires || et.Before(t) { - t = et - expires = true - } - } - } - return t, expires -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go deleted file mode 100644 index fb76ba55..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go +++ /dev/null @@ -1,381 +0,0 @@ -package bakery - -import ( - "bytes" - "crypto/rand" - "encoding/base64" - "encoding/binary" - "encoding/json" - - "golang.org/x/crypto/nacl/box" - "gopkg.in/errgo.v1" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -type caveatRecord struct { - RootKey []byte - Condition string -} - -// caveatJSON defines the format of a V1 JSON-encoded third party caveat id. -type caveatJSON struct { - ThirdPartyPublicKey *PublicKey - FirstPartyPublicKey *PublicKey - Nonce []byte - Id string -} - -// encodeCaveat encrypts a third-party caveat with the given condtion -// and root key. The thirdPartyInfo key holds information about the -// third party we're encrypting the caveat for; the key is the -// public/private key pair of the party that's adding the caveat. -// -// The caveat will be encoded according to the version information -// found in thirdPartyInfo. -func encodeCaveat( - condition string, - rootKey []byte, - thirdPartyInfo ThirdPartyInfo, - key *KeyPair, - ns *checkers.Namespace, -) ([]byte, error) { - switch thirdPartyInfo.Version { - case Version0, Version1: - return encodeCaveatV1(condition, rootKey, &thirdPartyInfo.PublicKey, key) - case Version2: - return encodeCaveatV2(condition, rootKey, &thirdPartyInfo.PublicKey, key) - default: - // Version 3 or later - use V3. - return encodeCaveatV3(condition, rootKey, &thirdPartyInfo.PublicKey, key, ns) - } -} - -// encodeCaveatV1 creates a JSON-encoded third-party caveat -// with the given condtion and root key. The thirdPartyPubKey key -// represents the public key of the third party we're encrypting -// the caveat for; the key is the public/private key pair of the party -// that's adding the caveat. -func encodeCaveatV1( - condition string, - rootKey []byte, - thirdPartyPubKey *PublicKey, - key *KeyPair, -) ([]byte, error) { - var nonce [NonceLen]byte - if _, err := rand.Read(nonce[:]); err != nil { - return nil, errgo.Notef(err, "cannot generate random number for nonce") - } - plain := caveatRecord{ - RootKey: rootKey, - Condition: condition, - } - plainData, err := json.Marshal(&plain) - if err != nil { - return nil, errgo.Notef(err, "cannot marshal %#v", &plain) - } - sealed := box.Seal(nil, plainData, &nonce, thirdPartyPubKey.boxKey(), key.Private.boxKey()) - id := caveatJSON{ - ThirdPartyPublicKey: thirdPartyPubKey, - FirstPartyPublicKey: &key.Public, - Nonce: nonce[:], - Id: base64.StdEncoding.EncodeToString(sealed), - } - data, err := json.Marshal(id) - if err != nil { - return nil, errgo.Notef(err, "cannot marshal %#v", id) - } - buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) - base64.StdEncoding.Encode(buf, data) - return buf, nil -} - -// encodeCaveatV2 creates a version 2 third-party caveat. -func encodeCaveatV2( - condition string, - rootKey []byte, - thirdPartyPubKey *PublicKey, - key *KeyPair, -) ([]byte, error) { - return encodeCaveatV2V3(Version2, condition, rootKey, thirdPartyPubKey, key, nil) -} - -// encodeCaveatV3 creates a version 3 third-party caveat. -func encodeCaveatV3( - condition string, - rootKey []byte, - thirdPartyPubKey *PublicKey, - key *KeyPair, - ns *checkers.Namespace, -) ([]byte, error) { - return encodeCaveatV2V3(Version3, condition, rootKey, thirdPartyPubKey, key, ns) -} - -const publicKeyPrefixLen = 4 - -// version3CaveatMinLen holds an underestimate of the -// minimum length of a version 3 caveat. -const version3CaveatMinLen = 1 + 4 + 32 + 24 + box.Overhead + 1 - -// encodeCaveatV3 creates a version 2 or version 3 third-party caveat. -// -// The format has the following packed binary fields (note -// that all fields up to and including the nonce are the same -// as the v2 format): -// -// version 2 or 3 [1 byte] -// first 4 bytes of third-party Curve25519 public key [4 bytes] -// first-party Curve25519 public key [32 bytes] -// nonce [24 bytes] -// encrypted secret part [rest of message] -// -// The encrypted part encrypts the following fields -// with box.Seal: -// -// version 2 or 3 [1 byte] -// length of root key [n: uvarint] -// root key [n bytes] -// length of encoded namespace [n: uvarint] (Version 3 only) -// encoded namespace [n bytes] (Version 3 only) -// condition [rest of encrypted part] -func encodeCaveatV2V3( - version Version, - condition string, - rootKey []byte, - thirdPartyPubKey *PublicKey, - key *KeyPair, - ns *checkers.Namespace, -) ([]byte, error) { - - var nsData []byte - if version >= Version3 { - data, err := ns.MarshalText() - if err != nil { - return nil, errgo.Mask(err) - } - nsData = data - } - // dataLen is our estimate of how long the data will be. - // As we always use append, this doesn't have to be strictly - // accurate but it's nice to avoid allocations. - dataLen := 0 + - 1 + // version - publicKeyPrefixLen + - KeyLen + - NonceLen + - box.Overhead + - 1 + // version - uvarintLen(uint64(len(rootKey))) + - len(rootKey) + - uvarintLen(uint64(len(nsData))) + - len(nsData) + - len(condition) - - var nonce [NonceLen]byte = uuidGen.Next() - - data := make([]byte, 0, dataLen) - data = append(data, byte(version)) - data = append(data, thirdPartyPubKey.Key[:publicKeyPrefixLen]...) - data = append(data, key.Public.Key[:]...) - data = append(data, nonce[:]...) - secret := encodeSecretPartV2V3(version, condition, rootKey, nsData) - return box.Seal(data, secret, &nonce, thirdPartyPubKey.boxKey(), key.Private.boxKey()), nil -} - -// encodeSecretPartV2V3 creates a version 2 or version 3 secret part of the third party -// caveat. The returned data is not encrypted. -// -// The format has the following packed binary fields: -// version 2 or 3 [1 byte] -// root key length [n: uvarint] -// root key [n bytes] -// namespace length [n: uvarint] (v3 only) -// namespace [n bytes] (v3 only) -// predicate [rest of message] -func encodeSecretPartV2V3(version Version, condition string, rootKey, nsData []byte) []byte { - data := make([]byte, 0, 1+binary.MaxVarintLen64+len(rootKey)+len(condition)) - data = append(data, byte(version)) // version - data = appendUvarint(data, uint64(len(rootKey))) - data = append(data, rootKey...) - if version >= Version3 { - data = appendUvarint(data, uint64(len(nsData))) - data = append(data, nsData...) - } - data = append(data, condition...) - return data -} - -// decodeCaveat attempts to decode caveat by decrypting the encrypted part -// using key. -func decodeCaveat(key *KeyPair, caveat []byte) (*ThirdPartyCaveatInfo, error) { - if len(caveat) == 0 { - return nil, errgo.New("empty third party caveat") - } - switch caveat[0] { - case byte(Version2): - return decodeCaveatV2V3(Version2, key, caveat) - case byte(Version3): - if len(caveat) < version3CaveatMinLen { - // If it has the version 3 caveat tag and it's too short, it's - // almost certainly an id, not an encrypted payload. - return nil, errgo.Newf("caveat id payload not provided for caveat id %q", caveat) - } - return decodeCaveatV2V3(Version3, key, caveat) - case 'e': - // 'e' will be the first byte if the caveatid is a base64 encoded JSON object. - return decodeCaveatV1(key, caveat) - default: - return nil, errgo.Newf("caveat has unsupported version %d", caveat[0]) - } -} - -// decodeCaveatV1 attempts to decode a base64 encoded JSON id. This -// encoding is nominally version -1. -func decodeCaveatV1(key *KeyPair, caveat []byte) (*ThirdPartyCaveatInfo, error) { - data := make([]byte, (3*len(caveat)+3)/4) - n, err := base64.StdEncoding.Decode(data, caveat) - if err != nil { - return nil, errgo.Notef(err, "cannot base64-decode caveat") - } - data = data[:n] - var wrapper caveatJSON - if err := json.Unmarshal(data, &wrapper); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal caveat %q", data) - } - if !bytes.Equal(key.Public.Key[:], wrapper.ThirdPartyPublicKey.Key[:]) { - return nil, errgo.New("public key mismatch") - } - if wrapper.FirstPartyPublicKey == nil { - return nil, errgo.New("target service public key not specified") - } - // The encrypted string is base64 encoded in the JSON representation. - secret, err := base64.StdEncoding.DecodeString(wrapper.Id) - if err != nil { - return nil, errgo.Notef(err, "cannot base64-decode encrypted data") - } - var nonce [NonceLen]byte - if copy(nonce[:], wrapper.Nonce) < NonceLen { - return nil, errgo.Newf("nonce too short %x", wrapper.Nonce) - } - c, ok := box.Open(nil, secret, &nonce, wrapper.FirstPartyPublicKey.boxKey(), key.Private.boxKey()) - if !ok { - return nil, errgo.Newf("cannot decrypt caveat %#v", wrapper) - } - var record caveatRecord - if err := json.Unmarshal(c, &record); err != nil { - return nil, errgo.Notef(err, "cannot decode third party caveat record") - } - return &ThirdPartyCaveatInfo{ - Condition: []byte(record.Condition), - FirstPartyPublicKey: *wrapper.FirstPartyPublicKey, - ThirdPartyKeyPair: *key, - RootKey: record.RootKey, - Caveat: caveat, - Version: Version1, - Namespace: legacyNamespace(), - }, nil -} - -// decodeCaveatV2V3 decodes a version 2 or version 3 caveat. -func decodeCaveatV2V3(version Version, key *KeyPair, caveat []byte) (*ThirdPartyCaveatInfo, error) { - origCaveat := caveat - if len(caveat) < 1+publicKeyPrefixLen+KeyLen+NonceLen+box.Overhead { - return nil, errgo.New("caveat id too short") - } - caveat = caveat[1:] // skip version (already checked) - - publicKeyPrefix, caveat := caveat[:publicKeyPrefixLen], caveat[publicKeyPrefixLen:] - if !bytes.Equal(key.Public.Key[:publicKeyPrefixLen], publicKeyPrefix) { - return nil, errgo.New("public key mismatch") - } - - var firstPartyPub PublicKey - copy(firstPartyPub.Key[:], caveat[:KeyLen]) - caveat = caveat[KeyLen:] - - var nonce [NonceLen]byte - copy(nonce[:], caveat[:NonceLen]) - caveat = caveat[NonceLen:] - - data, ok := box.Open(nil, caveat, &nonce, firstPartyPub.boxKey(), key.Private.boxKey()) - if !ok { - return nil, errgo.Newf("cannot decrypt caveat id") - } - rootKey, ns, condition, err := decodeSecretPartV2V3(version, data) - if err != nil { - return nil, errgo.Notef(err, "invalid secret part") - } - return &ThirdPartyCaveatInfo{ - Condition: condition, - FirstPartyPublicKey: firstPartyPub, - ThirdPartyKeyPair: *key, - RootKey: rootKey, - Caveat: origCaveat, - Version: version, - Namespace: ns, - }, nil -} - -func decodeSecretPartV2V3(version Version, data []byte) (rootKey []byte, ns *checkers.Namespace, condition []byte, err error) { - fail := func(err error) ([]byte, *checkers.Namespace, []byte, error) { - return nil, nil, nil, err - } - if len(data) < 1 { - return fail(errgo.New("secret part too short")) - } - gotVersion, data := data[0], data[1:] - if version != Version(gotVersion) { - return fail(errgo.Newf("unexpected secret part version, got %d want %d", gotVersion, version)) - } - - l, n := binary.Uvarint(data) - if n <= 0 || uint64(n)+l > uint64(len(data)) { - return fail(errgo.Newf("invalid root key length")) - } - data = data[n:] - rootKey, data = data[:l], data[l:] - - if version >= Version3 { - var nsData []byte - var ns1 checkers.Namespace - - l, n = binary.Uvarint(data) - if n <= 0 || uint64(n)+l > uint64(len(data)) { - return fail(errgo.Newf("invalid namespace length")) - } - data = data[n:] - nsData, data = data[:l], data[l:] - if err := ns1.UnmarshalText(nsData); err != nil { - return fail(errgo.Notef(err, "cannot unmarshal namespace")) - } - ns = &ns1 - } else { - ns = legacyNamespace() - } - return rootKey, ns, data, nil -} - -// appendUvarint appends n to data encoded as a variable-length -// unsigned integer. -func appendUvarint(data []byte, n uint64) []byte { - // Ensure the capacity is sufficient. If our space calculations when - // allocating data were correct, this should never happen, - // but be defensive just in case. - for need := uvarintLen(n); cap(data)-len(data) < need; { - data1 := append(data[0:cap(data)], 0) - data = data1[0:len(data)] - } - nlen := binary.PutUvarint(data[len(data):cap(data)], n) - return data[0 : len(data)+nlen] -} - -// uvarintLen returns the number of bytes that n will require -// when encoded with binary.PutUvarint. -func uvarintLen(n uint64) int { - len := 1 - n >>= 7 - for ; n > 0; n >>= 7 { - len++ - } - return len -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go deleted file mode 100644 index 4c7b0ae6..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go +++ /dev/null @@ -1,282 +0,0 @@ -package bakery - -import ( - "context" - "crypto/rand" - "fmt" - "strconv" - "strings" - - "gopkg.in/errgo.v1" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -// LocalThirdPartyCaveat returns a third-party caveat that, when added -// to a macaroon with AddCaveat, results in a caveat -// with the location "local", encrypted with the given public key. -// This can be automatically discharged by DischargeAllWithKey. -func LocalThirdPartyCaveat(key *PublicKey, version Version) checkers.Caveat { - var loc string - if version < Version2 { - loc = "local " + key.String() - } else { - loc = fmt.Sprintf("local %d %s", version, key) - } - return checkers.Caveat{ - Location: loc, - } -} - -// parseLocalLocation parses a local caveat location as generated by -// LocalThirdPartyCaveat. This is of the form: -// -// local -// -// where is the bakery version of the client that we're -// adding the local caveat for. -// -// It returns false if the location does not represent a local -// caveat location. -func parseLocalLocation(loc string) (ThirdPartyInfo, bool) { - if !strings.HasPrefix(loc, "local ") { - return ThirdPartyInfo{}, false - } - version := Version1 - fields := strings.Fields(loc) - fields = fields[1:] // Skip "local" - switch len(fields) { - case 2: - v, err := strconv.Atoi(fields[0]) - if err != nil { - return ThirdPartyInfo{}, false - } - version = Version(v) - fields = fields[1:] - fallthrough - case 1: - var key PublicKey - if err := key.UnmarshalText([]byte(fields[0])); err != nil { - return ThirdPartyInfo{}, false - } - return ThirdPartyInfo{ - PublicKey: key, - Version: version, - }, true - default: - return ThirdPartyInfo{}, false - } -} - -// DischargeParams holds parameters for a Discharge call. -type DischargeParams struct { - // Id holds the id to give to the discharge macaroon. - // If Caveat is empty, then the id also holds the - // encrypted third party caveat. - Id []byte - - // Caveat holds the encrypted third party caveat. If this - // is nil, Id will be used. - Caveat []byte - - // Key holds the key to use to decrypt the third party - // caveat information and to encrypt any additional - // third party caveats returned by the caveat checker. - Key *KeyPair - - // Checker is used to check the third party caveat, - // and may also return further caveats to be added to - // the discharge macaroon. - Checker ThirdPartyCaveatChecker - - // Locator is used to information on third parties - // referred to by third party caveats returned by the Checker. - Locator ThirdPartyLocator -} - -// Discharge creates a macaroon to discharges a third party caveat. -// The given parameters specify the caveat and how it should be checked/ -// -// The condition implicit in the caveat is checked for validity using p.Checker. If -// it is valid, a new macaroon is returned which discharges the caveat. -// -// The macaroon is created with a version derived from the version -// that was used to encode the id. -func Discharge(ctx context.Context, p DischargeParams) (*Macaroon, error) { - var caveatIdPrefix []byte - if p.Caveat == nil { - // The caveat information is encoded in the id itself. - p.Caveat = p.Id - } else { - // We've been given an explicit id, so when extra third party - // caveats are added, use that id as the prefix - // for any more ids. - caveatIdPrefix = p.Id - } - cavInfo, err := decodeCaveat(p.Key, p.Caveat) - if err != nil { - return nil, errgo.Notef(err, "discharger cannot decode caveat id") - } - cavInfo.Id = p.Id - // Note that we don't check the error - we allow the - // third party checker to see even caveats that we can't - // understand. - cond, arg, _ := checkers.ParseCaveat(string(cavInfo.Condition)) - - var caveats []checkers.Caveat - if cond == checkers.CondNeedDeclared { - cavInfo.Condition = []byte(arg) - caveats, err = checkNeedDeclared(ctx, cavInfo, p.Checker) - } else { - caveats, err = p.Checker.CheckThirdPartyCaveat(ctx, cavInfo) - } - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - // Note that the discharge macaroon does not need to - // be stored persistently. Indeed, it would be a problem if - // we did, because then the macaroon could potentially be used - // for normal authorization with the third party. - m, err := NewMacaroon(cavInfo.RootKey, p.Id, "", cavInfo.Version, cavInfo.Namespace) - if err != nil { - return nil, errgo.Mask(err) - } - m.caveatIdPrefix = caveatIdPrefix - for _, cav := range caveats { - if err := m.AddCaveat(ctx, cav, p.Key, p.Locator); err != nil { - return nil, errgo.Notef(err, "could not add caveat") - } - } - return m, nil -} - -func checkNeedDeclared(ctx context.Context, cavInfo *ThirdPartyCaveatInfo, checker ThirdPartyCaveatChecker) ([]checkers.Caveat, error) { - arg := string(cavInfo.Condition) - i := strings.Index(arg, " ") - if i <= 0 { - return nil, errgo.Newf("need-declared caveat requires an argument, got %q", arg) - } - needDeclared := strings.Split(arg[0:i], ",") - for _, d := range needDeclared { - if d == "" { - return nil, errgo.New("need-declared caveat with empty required attribute") - } - } - if len(needDeclared) == 0 { - return nil, fmt.Errorf("need-declared caveat with no required attributes") - } - cavInfo.Condition = []byte(arg[i+1:]) - caveats, err := checker.CheckThirdPartyCaveat(ctx, cavInfo) - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - declared := make(map[string]bool) - for _, cav := range caveats { - if cav.Location != "" { - continue - } - // Note that we ignore the error. We allow the service to - // generate caveats that we don't understand here. - cond, arg, _ := checkers.ParseCaveat(cav.Condition) - if cond != checkers.CondDeclared { - continue - } - parts := strings.SplitN(arg, " ", 2) - if len(parts) != 2 { - return nil, errgo.Newf("declared caveat has no value") - } - declared[parts[0]] = true - } - // Add empty declarations for everything mentioned in need-declared - // that was not actually declared. - for _, d := range needDeclared { - if !declared[d] { - caveats = append(caveats, checkers.DeclaredCaveat(d, "")) - } - } - return caveats, nil -} - -func randomBytes(n int) ([]byte, error) { - b := make([]byte, n) - _, err := rand.Read(b) - if err != nil { - return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err) - } - return b, nil -} - -// ThirdPartyCaveatInfo holds the information decoded from -// a third party caveat id. -type ThirdPartyCaveatInfo struct { - // Condition holds the third party condition to be discharged. - // This is the only field that most third party dischargers will - // need to consider. - Condition []byte - - // FirstPartyPublicKey holds the public key of the party - // that created the third party caveat. - FirstPartyPublicKey PublicKey - - // ThirdPartyKeyPair holds the key pair used to decrypt - // the caveat - the key pair of the discharging service. - ThirdPartyKeyPair KeyPair - - // RootKey holds the secret root key encoded by the caveat. - RootKey []byte - - // CaveatId holds the full encoded caveat id from which all - // the other fields are derived. - Caveat []byte - - // Version holds the version that was used to encode - // the caveat id. - Version Version - - // Id holds the id of the third party caveat (the id that - // the discharge macaroon should be given). This - // will differ from Caveat when the caveat information - // is encoded separately. - Id []byte - - // Namespace holds the namespace of the first party - // that created the macaroon, as encoded by the party - // that added the third party caveat. - Namespace *checkers.Namespace -} - -// ThirdPartyCaveatChecker holds a function that checks third party caveats -// for validity. If the caveat is valid, it returns a nil error and -// optionally a slice of extra caveats that will be added to the -// discharge macaroon. The caveatId parameter holds the still-encoded id -// of the caveat. -// -// If the caveat kind was not recognised, the checker should return an -// error with a ErrCaveatNotRecognized cause. -type ThirdPartyCaveatChecker interface { - CheckThirdPartyCaveat(ctx context.Context, info *ThirdPartyCaveatInfo) ([]checkers.Caveat, error) -} - -// ThirdPartyCaveatCheckerFunc implements ThirdPartyCaveatChecker by calling a function. -type ThirdPartyCaveatCheckerFunc func(context.Context, *ThirdPartyCaveatInfo) ([]checkers.Caveat, error) - -// CheckThirdPartyCaveat implements ThirdPartyCaveatChecker.CheckThirdPartyCaveat by calling -// the receiver with the given arguments -func (c ThirdPartyCaveatCheckerFunc) CheckThirdPartyCaveat(ctx context.Context, info *ThirdPartyCaveatInfo) ([]checkers.Caveat, error) { - return c(ctx, info) -} - -// FirstPartyCaveatChecker is used to check first party caveats -// for validity with respect to information in the provided context. -// -// If the caveat kind was not recognised, the checker should return -// ErrCaveatNotRecognized. -type FirstPartyCaveatChecker interface { - // CheckFirstPartyCaveat checks that the given caveat condition - // is valid with respect to the given context information. - CheckFirstPartyCaveat(ctx context.Context, caveat string) error - - // Namespace returns the namespace associated with the - // caveat checker. - Namespace() *checkers.Namespace -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go deleted file mode 100644 index 9c117ba8..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go +++ /dev/null @@ -1,56 +0,0 @@ -package bakery - -import ( - "context" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v2" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -// DischargeAll gathers discharge macaroons for all the third party -// caveats in m (and any subsequent caveats required by those) using -// getDischarge to acquire each discharge macaroon. It returns a slice -// with m as the first element, followed by all the discharge macaroons. -// All the discharge macaroons will be bound to the primary macaroon. -// -// The getDischarge function is passed the caveat to be discharged; -// encryptedCaveat will be passed the external caveat payload found -// in m, if any. -func DischargeAll( - ctx context.Context, - m *Macaroon, - getDischarge func(ctx context.Context, cav macaroon.Caveat, encryptedCaveat []byte) (*Macaroon, error), -) (macaroon.Slice, error) { - return DischargeAllWithKey(ctx, m, getDischarge, nil) -} - -// DischargeAllWithKey is like DischargeAll except that the localKey -// parameter may optionally hold the key of the client, in which case it -// will be used to discharge any third party caveats with the special -// location "local". In this case, the caveat itself must be "true". This -// can be used be a server to ask a client to prove ownership of the -// private key. -// -// When localKey is nil, DischargeAllWithKey is exactly the same as -// DischargeAll. -func DischargeAllWithKey( - ctx context.Context, - m *Macaroon, - getDischarge func(ctx context.Context, cav macaroon.Caveat, encodedCaveat []byte) (*Macaroon, error), - localKey *KeyPair, -) (macaroon.Slice, error) { - discharges, err := Slice{m}.DischargeAll(ctx, getDischarge, localKey) - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - return discharges.Bind(), nil -} - -var localDischargeChecker = ThirdPartyCaveatCheckerFunc(func(_ context.Context, info *ThirdPartyCaveatInfo) ([]checkers.Caveat, error) { - if string(info.Condition) != "true" { - return nil, checkers.ErrCaveatNotRecognized - } - return nil, nil -}) diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go deleted file mode 100644 index f58f699d..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go +++ /dev/null @@ -1,88 +0,0 @@ -// The bakery package layers on top of the macaroon package, providing -// a transport and store-agnostic way of using macaroons to assert -// client capabilities. -// -// Summary -// -// The Bakery type is probably where you want to start. -// It encapsulates a Checker type, which performs checking -// of operations, and an Oven type, which encapsulates -// the actual details of the macaroon encoding conventions. -// -// Most other types and functions are designed either to plug -// into one of the above types (the various Authorizer -// implementations, for example), or to expose some independent -// functionality that's potentially useful (Discharge, for example). -// -// The rest of this introduction introduces some of the concepts -// used by the bakery package. -// -// Identity and entities -// -// An Identity represents some authenticated user (or agent), usually -// the client in a network protocol. An identity can be authenticated by -// an external identity server (with a third party macaroon caveat) or -// by locally provided information such as a username and password. -// -// The Checker type is not responsible for determining identity - that -// functionality is represented by the IdentityClient interface. -// -// The Checker uses identities to decide whether something should be -// allowed or not - the Authorizer interface is used to ask whether a -// given identity should be allowed to perform some set of operations. -// -// Operations -// -// An operation defines some requested action on an entity. For example, -// if file system server defines an entity for every file in the server, -// an operation to read a file might look like: -// -// Op{ -// Entity: "/foo", -// Action: "write", -// } -// -// The exact set of entities and actions is up to the caller, but should -// be kept stable over time because authorization tokens will contain -// these names. -// -// To authorize some request on behalf of a remote user, first find out -// what operations that request needs to perform. For example, if the -// user tries to delete a file, the entity might be the path to the -// file's directory and the action might be "write". It may often be -// possible to determine the operations required by a request without -// reference to anything external, when the request itself contains all -// the necessary information. -// -// The LoginOp operation is special - any macaroon associated with this -// operation is treated as a bearer of identity information. If two -// valid LoginOp macaroons are presented, only the first one will be -// used for identity. -// -// Authorization -// -// The Authorizer interface is responsible for determining whether a -// given authenticated identity is authorized to perform a set of -// operations. This is used when the macaroons provided to Auth are not -// sufficient to authorize the operations themselves. -// -// Capabilities -// -// A "capability" is represented by a macaroon that's associated with -// one or more operations, and grants the capability to perform all -// those operations. The AllowCapability method reports whether a -// capability is allowed. It takes into account any authenticated -// identity and any other capabilities provided. -// -// Third party caveats -// -// Sometimes authorization will only be granted if a third party caveat -// is discharged. This will happen when an IdentityClient or Authorizer -// returns a third party caveat. -// -// When this happens, a DischargeRequiredError will be returned -// containing the caveats and the operations required. The caller is -// responsible for creating a macaroon with those caveats associated -// with those operations and for passing that macaroon to the client to -// discharge. -package bakery diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go deleted file mode 100644 index 1a059d59..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go +++ /dev/null @@ -1,77 +0,0 @@ -package bakery - -import ( - "fmt" - - "gopkg.in/errgo.v1" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -var ( - // ErrNotFound is returned by Store.Get implementations - // to signal that an id has not been found. - ErrNotFound = errgo.New("not found") - - // ErrPermissionDenied is returned from AuthChecker when - // permission has been denied. - ErrPermissionDenied = errgo.New("permission denied") -) - -// DischargeRequiredError is returned when authorization has failed and a -// discharged macaroon might fix it. -// -// A caller should grant the user the ability to authorize by minting a -// macaroon associated with Ops (see MacaroonStore.MacaroonIdInfo for -// how the associated operations are retrieved) and adding Caveats. If -// the user succeeds in discharging the caveats, the authorization will -// be granted. -type DischargeRequiredError struct { - // Message holds some reason why the authorization was denied. - // TODO this is insufficient (and maybe unnecessary) because we - // can have multiple errors. - Message string - - // Ops holds all the operations that were not authorized. - // If Ops contains a single LoginOp member, the macaroon - // should be treated as an login token. Login tokens (also - // known as authentication macaroons) usually have a longer - // life span than other macaroons. - Ops []Op - - // Caveats holds the caveats that must be added - // to macaroons that authorize the above operations. - Caveats []checkers.Caveat - - // ForAuthentication holds whether the macaroon holding - // the discharges will be used for authentication, and hence - // should have wider scope and longer lifetime. - // The bakery package never sets this field, but bakery/identchecker - // uses it. - ForAuthentication bool -} - -func (e *DischargeRequiredError) Error() string { - return "macaroon discharge required: " + e.Message -} - -func IsDischargeRequiredError(err error) bool { - _, ok := err.(*DischargeRequiredError) - return ok -} - -// VerificationError is used to signify that an error is because -// of a verification failure rather than because verification -// could not be done. -type VerificationError struct { - Reason error -} - -func (e *VerificationError) Error() string { - return fmt.Sprintf("verification failed: %v", e.Reason) -} - -func isVerificationError(err error) bool { - _, ok := errgo.Cause(err).(*VerificationError) - return ok -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go deleted file mode 100644 index 7cffa9f3..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go +++ /dev/null @@ -1,219 +0,0 @@ -package bakery - -import ( - "context" - "crypto/rand" - "encoding/base64" - "encoding/json" - "strings" - "sync" - - "golang.org/x/crypto/curve25519" - "golang.org/x/crypto/nacl/box" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v2" -) - -// KeyLen is the byte length of the Ed25519 public and private keys used for -// caveat id encryption. -const KeyLen = 32 - -// NonceLen is the byte length of the nonce values used for caveat id -// encryption. -const NonceLen = 24 - -// PublicKey is a 256-bit Ed25519 public key. -type PublicKey struct { - Key -} - -// PrivateKey is a 256-bit Ed25519 private key. -type PrivateKey struct { - Key -} - -// Public derives the public key from a private key. -func (k PrivateKey) Public() PublicKey { - var pub PublicKey - curve25519.ScalarBaseMult((*[32]byte)(&pub.Key), (*[32]byte)(&k.Key)) - return pub -} - -// Key is a 256-bit Ed25519 key. -type Key [KeyLen]byte - -// String returns the base64 representation of the key. -func (k Key) String() string { - return base64.StdEncoding.EncodeToString(k[:]) -} - -// MarshalBinary implements encoding.BinaryMarshaler.MarshalBinary. -func (k Key) MarshalBinary() ([]byte, error) { - return k[:], nil -} - -// isZero reports whether the key consists entirely of zeros. -func (k Key) isZero() bool { - return k == Key{} -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler.UnmarshalBinary. -func (k *Key) UnmarshalBinary(data []byte) error { - if len(data) != len(k) { - return errgo.Newf("wrong length for key, got %d want %d", len(data), len(k)) - } - copy(k[:], data) - return nil -} - -// MarshalText implements encoding.TextMarshaler.MarshalText. -func (k Key) MarshalText() ([]byte, error) { - data := make([]byte, base64.StdEncoding.EncodedLen(len(k))) - base64.StdEncoding.Encode(data, k[:]) - return data, nil -} - -// boxKey returns the box package's type for a key. -func (k Key) boxKey() *[KeyLen]byte { - return (*[KeyLen]byte)(&k) -} - -// UnmarshalText implements encoding.TextUnmarshaler.UnmarshalText. -func (k *Key) UnmarshalText(text []byte) error { - data, err := macaroon.Base64Decode(text) - if err != nil { - return errgo.Notef(err, "cannot decode base64 key") - } - if len(data) != len(k) { - return errgo.Newf("wrong length for key, got %d want %d", len(data), len(k)) - } - copy(k[:], data) - return nil -} - -// ThirdPartyInfo holds information on a given third party -// discharge service. -type ThirdPartyInfo struct { - // PublicKey holds the public key of the third party. - PublicKey PublicKey - - // Version holds latest the bakery protocol version supported - // by the discharger. - Version Version -} - -// ThirdPartyLocator is used to find information on third -// party discharge services. -type ThirdPartyLocator interface { - // ThirdPartyInfo returns information on the third - // party at the given location. It returns ErrNotFound if no match is found. - // This method must be safe to call concurrently. - ThirdPartyInfo(ctx context.Context, loc string) (ThirdPartyInfo, error) -} - -// ThirdPartyStore implements a simple ThirdPartyLocator. -// A trailing slash on locations is ignored. -type ThirdPartyStore struct { - mu sync.RWMutex - m map[string]ThirdPartyInfo -} - -// NewThirdPartyStore returns a new instance of ThirdPartyStore -// that stores locations in memory. -func NewThirdPartyStore() *ThirdPartyStore { - return &ThirdPartyStore{ - m: make(map[string]ThirdPartyInfo), - } -} - -// AddInfo associates the given information with the -// given location, ignoring any trailing slash. -// This method is OK to call concurrently with sThirdPartyInfo. -func (s *ThirdPartyStore) AddInfo(loc string, info ThirdPartyInfo) { - s.mu.Lock() - defer s.mu.Unlock() - s.m[canonicalLocation(loc)] = info -} - -func canonicalLocation(loc string) string { - return strings.TrimSuffix(loc, "/") -} - -// ThirdPartyInfo implements the ThirdPartyLocator interface. -func (s *ThirdPartyStore) ThirdPartyInfo(ctx context.Context, loc string) (ThirdPartyInfo, error) { - s.mu.RLock() - defer s.mu.RUnlock() - if info, ok := s.m[canonicalLocation(loc)]; ok { - return info, nil - } - return ThirdPartyInfo{}, ErrNotFound -} - -// KeyPair holds a public/private pair of keys. -type KeyPair struct { - Public PublicKey `json:"public"` - Private PrivateKey `json:"private"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (k *KeyPair) UnmarshalJSON(data []byte) error { - type keyPair KeyPair - if err := json.Unmarshal(data, (*keyPair)(k)); err != nil { - return err - } - return k.validate() -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (k *KeyPair) UnmarshalYAML(unmarshal func(interface{}) error) error { - type keyPair KeyPair - if err := unmarshal((*keyPair)(k)); err != nil { - return err - } - return k.validate() -} - -func (k *KeyPair) validate() error { - if k.Public.isZero() { - return errgo.Newf("missing public key") - } - if k.Private.isZero() { - return errgo.Newf("missing private key") - } - return nil -} - -// GenerateKey generates a new key pair. -func GenerateKey() (*KeyPair, error) { - var key KeyPair - pub, priv, err := box.GenerateKey(rand.Reader) - if err != nil { - return nil, err - } - key.Public = PublicKey{*pub} - key.Private = PrivateKey{*priv} - return &key, nil -} - -// MustGenerateKey is like GenerateKey but panics if GenerateKey returns -// an error - useful in tests. -func MustGenerateKey() *KeyPair { - key, err := GenerateKey() - if err != nil { - panic(errgo.Notef(err, "cannot generate key")) - } - return key -} - -// String implements the fmt.Stringer interface -// by returning the base64 representation of the -// public key part of key. -func (key *KeyPair) String() string { - return key.Public.String() -} - -type emptyLocator struct{} - -func (emptyLocator) ThirdPartyInfo(context.Context, string) (ThirdPartyInfo, error) { - return ThirdPartyInfo{}, ErrNotFound -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go deleted file mode 100644 index acb5a1f5..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go +++ /dev/null @@ -1,28 +0,0 @@ -package bakery - -import ( - "context" -) - -// Logger is used by the bakery to log informational messages -// about bakery operations. -type Logger interface { - Infof(ctx context.Context, f string, args ...interface{}) - Debugf(ctx context.Context, f string, args ...interface{}) -} - -// DefaultLogger returns a Logger instance that does nothing. -// -// Deprecated: DefaultLogger exists for historical compatibility -// only. Previously it logged using github.com/juju/loggo. -func DefaultLogger(name string) Logger { - return nopLogger{} -} - -type nopLogger struct{} - -// Debugf implements Logger.Debugf. -func (nopLogger) Debugf(context.Context, string, ...interface{}) {} - -// Debugf implements Logger.Infof. -func (nopLogger) Infof(context.Context, string, ...interface{}) {} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go deleted file mode 100644 index d5ad3b64..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go +++ /dev/null @@ -1,356 +0,0 @@ -package bakery - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/binary" - "encoding/json" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v2" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -// legacyNamespace holds the standard namespace as used by -// pre-version3 macaroons. -func legacyNamespace() *checkers.Namespace { - ns := checkers.NewNamespace(nil) - ns.Register(checkers.StdNamespace, "") - return ns -} - -// Macaroon represents an undischarged macaroon along with its first -// party caveat namespace and associated third party caveat information -// which should be passed to the third party when discharging a caveat. -type Macaroon struct { - // m holds the underlying macaroon. - m *macaroon.Macaroon - - // version holds the version of the macaroon. - version Version - - // caveatData maps from a third party caveat id to its - // associated information, usually public-key encrypted with the - // third party's public key. - // - // If version is less than Version3, this will always be nil, - // because clients prior to that version do not support - // macaroon-external caveat ids. - caveatData map[string][]byte - - // namespace holds the first-party caveat namespace of the macaroon. - namespace *checkers.Namespace - - // caveatIdPrefix holds the prefix to use for the ids of any third - // party caveats created. This can be set when Discharge creates a - // discharge macaroon. - caveatIdPrefix []byte -} - -// NewLegacyMacaroon returns a new macaroon holding m. -// This should only be used when there's no alternative -// (for example when m has been unmarshaled -// from some alternative format). -func NewLegacyMacaroon(m *macaroon.Macaroon) (*Macaroon, error) { - v, err := bakeryVersion(m.Version()) - if err != nil { - return nil, errgo.Mask(err) - } - return &Macaroon{ - m: m, - version: v, - namespace: legacyNamespace(), - }, nil -} - -type macaroonJSON struct { - Macaroon *macaroon.Macaroon `json:"m"` - Version Version `json:"v"` - // Note: CaveatData is encoded using URL-base64-encoded keys - // because JSON cannot deal with arbitrary byte sequences - // in its strings, and URL-base64 values to match the - // standard macaroon encoding. - CaveatData map[string]string `json:"cdata,omitempty"` - Namespace *checkers.Namespace `json:"ns"` -} - -// Clone returns a copy of the macaroon. Note that the the new -// macaroon's namespace still points to the same underlying Namespace - -// copying the macaroon does not make a copy of the namespace. -func (m *Macaroon) Clone() *Macaroon { - m1 := *m - m1.m = m1.m.Clone() - m1.caveatData = make(map[string][]byte) - for id, data := range m.caveatData { - m1.caveatData[id] = data - } - return &m1 -} - -// MarshalJSON implements json.Marshaler by marshaling -// the macaroon into the original macaroon format if the -// version is earlier than Version3. -func (m *Macaroon) MarshalJSON() ([]byte, error) { - if m.version < Version3 { - if len(m.caveatData) > 0 { - return nil, errgo.Newf("cannot marshal pre-version3 macaroon with external caveat data") - } - return m.m.MarshalJSON() - } - caveatData := make(map[string]string) - for id, data := range m.caveatData { - caveatData[base64.RawURLEncoding.EncodeToString([]byte(id))] = base64.RawURLEncoding.EncodeToString(data) - } - return json.Marshal(macaroonJSON{ - Macaroon: m.m, - Version: m.version, - CaveatData: caveatData, - Namespace: m.namespace, - }) -} - -// UnmarshalJSON implements json.Unmarshaler by unmarshaling in a -// backwardly compatible way - if provided with a previous macaroon -// version, it will unmarshal that too. -func (m *Macaroon) UnmarshalJSON(data []byte) error { - // First try with new data format. - var m1 macaroonJSON - if err := json.Unmarshal(data, &m1); err != nil { - // If we get an unmarshal error, we won't be able - // to unmarshal into the old format either, as extra fields - // are ignored. - return errgo.Mask(err) - } - if m1.Macaroon == nil { - return m.unmarshalJSONOldFormat(data) - } - // We've got macaroon field - it's the new format. - if m1.Version < Version3 || m1.Version > LatestVersion { - return errgo.Newf("unexpected bakery macaroon version; got %d want %d", m1.Version, Version3) - } - if got, want := m1.Macaroon.Version(), MacaroonVersion(m1.Version); got != want { - return errgo.Newf("underlying macaroon has inconsistent version; got %d want %d", got, want) - } - caveatData := make(map[string][]byte) - for id64, data64 := range m1.CaveatData { - id, err := macaroon.Base64Decode([]byte(id64)) - if err != nil { - return errgo.Notef(err, "cannot decode caveat id") - } - data, err := macaroon.Base64Decode([]byte(data64)) - if err != nil { - return errgo.Notef(err, "cannot decode caveat") - } - caveatData[string(id)] = data - } - m.caveatData = caveatData - m.m = m1.Macaroon - m.namespace = m1.Namespace - // TODO should we allow version > LatestVersion here? - m.version = m1.Version - return nil -} - -// unmarshalJSONOldFormat unmarshals the data from an old format -// macaroon (without any external caveats or namespace). -func (m *Macaroon) unmarshalJSONOldFormat(data []byte) error { - // Try to unmarshal from the original format. - var m1 *macaroon.Macaroon - if err := json.Unmarshal(data, &m1); err != nil { - return errgo.Mask(err) - } - m2, err := NewLegacyMacaroon(m1) - if err != nil { - return errgo.Mask(err) - } - *m = *m2 - return nil -} - -// bakeryVersion returns a bakery version that corresponds to -// the macaroon version v. It is necessarily approximate because -// several bakery versions can correspond to a single macaroon -// version, so it's only of use when decoding legacy formats -// (in Macaroon.UnmarshalJSON). -// -// It will return an error if it doesn't recognize the version. -func bakeryVersion(v macaroon.Version) (Version, error) { - switch v { - case macaroon.V1: - // Use version 1 because we don't know of any existing - // version 0 clients. - return Version1, nil - case macaroon.V2: - // Note that this could also correspond to Version3, but - // this logic is explicitly for legacy versions. - return Version2, nil - default: - return 0, errgo.Newf("unknown macaroon version when legacy-unmarshaling bakery macaroon; got %d", v) - } -} - -// NewMacaroon creates and returns a new macaroon with the given root -// key, id and location. If the version is more than the latest known -// version, the latest known version will be used. The namespace is that -// of the service creating it. -func NewMacaroon(rootKey, id []byte, location string, version Version, ns *checkers.Namespace) (*Macaroon, error) { - if version > LatestVersion { - version = LatestVersion - } - m, err := macaroon.New(rootKey, id, location, MacaroonVersion(version)) - if err != nil { - return nil, errgo.Notef(err, "cannot create macaroon") - } - return &Macaroon{ - m: m, - version: version, - namespace: ns, - }, nil -} - -// M returns the underlying macaroon held within m. -func (m *Macaroon) M() *macaroon.Macaroon { - return m.m -} - -// Version returns the bakery version of the first party -// that created the macaroon. -func (m *Macaroon) Version() Version { - return m.version -} - -// Namespace returns the first party caveat namespace of the macaroon. -func (m *Macaroon) Namespace() *checkers.Namespace { - return m.namespace -} - -// AddCaveats is a convenienced method that calls m.AddCaveat for each -// caveat in cavs. -func (m *Macaroon) AddCaveats(ctx context.Context, cavs []checkers.Caveat, key *KeyPair, loc ThirdPartyLocator) error { - for _, cav := range cavs { - if err := m.AddCaveat(ctx, cav, key, loc); err != nil { - return errgo.Notef(err, "cannot add caveat %#v", cav) - } - } - return nil -} - -// AddCaveat adds a caveat to the given macaroon. -// -// If it's a third-party caveat, it encrypts it using the given key pair -// and by looking up the location using the given locator. If it's a -// first party cavat, key and loc are unused. -// -// As a special case, if the caveat's Location field has the prefix -// "local " the caveat is added as a client self-discharge caveat using -// the public key base64-encoded in the rest of the location. In this -// case, the Condition field must be empty. The resulting third-party -// caveat will encode the condition "true" encrypted with that public -// key. See LocalThirdPartyCaveat for a way of creating such caveats. -func (m *Macaroon) AddCaveat(ctx context.Context, cav checkers.Caveat, key *KeyPair, loc ThirdPartyLocator) error { - if cav.Location == "" { - if err := m.m.AddFirstPartyCaveat([]byte(m.namespace.ResolveCaveat(cav).Condition)); err != nil { - return errgo.Mask(err) - } - return nil - } - if key == nil { - return errgo.Newf("no private key to encrypt third party caveat") - } - var info ThirdPartyInfo - if localInfo, ok := parseLocalLocation(cav.Location); ok { - info = localInfo - cav.Location = "local" - if cav.Condition != "" { - return errgo.New("cannot specify caveat condition in local third-party caveat") - } - cav.Condition = "true" - } else { - if loc == nil { - return errgo.Newf("no locator when adding third party caveat") - } - var err error - info, err = loc.ThirdPartyInfo(ctx, cav.Location) - if err != nil { - return errgo.Notef(err, "cannot find public key for location %q", cav.Location) - } - } - rootKey, err := randomBytes(24) - if err != nil { - return errgo.Notef(err, "cannot generate third party secret") - } - // Use the least supported version to encode the caveat. - if m.version < info.Version { - info.Version = m.version - } - caveatInfo, err := encodeCaveat(cav.Condition, rootKey, info, key, m.namespace) - if err != nil { - return errgo.Notef(err, "cannot create third party caveat at %q", cav.Location) - } - var id []byte - if info.Version < Version3 { - // We're encoding for an earlier client or third party which does - // not understand bundled caveat info, so use the encoded - // caveat information as the caveat id. - id = caveatInfo - } else { - id = m.newCaveatId(m.caveatIdPrefix) - if m.caveatData == nil { - m.caveatData = make(map[string][]byte) - } - m.caveatData[string(id)] = caveatInfo - } - if err := m.m.AddThirdPartyCaveat(rootKey, id, cav.Location); err != nil { - return errgo.Notef(err, "cannot add third party caveat") - } - return nil -} - -// newCaveatId returns a third party caveat id that -// does not duplicate any third party caveat ids already inside m. -// -// If base is non-empty, it is used as the id prefix. -func (m *Macaroon) newCaveatId(base []byte) []byte { - var id []byte - if len(base) > 0 { - id = make([]byte, len(base), len(base)+binary.MaxVarintLen64) - copy(id, base) - } else { - id = make([]byte, 0, 1+binary.MaxVarintLen32) - // Add a version byte to the caveat id. Technically - // this is unnecessary as the caveat-decoding logic - // that looks at versions should never see this id, - // but if the caveat payload isn't provided with the - // payload, having this version gives a strong indication - // that the payload has been omitted so we can produce - // a better error for the user. - id = append(id, byte(Version3)) - } - - // Iterate through integers looking for one that isn't already used, - // starting from n so that if everyone is using this same algorithm, - // we'll only perform one iteration. - // - // Note that although this looks like an infinite loop, - // there's no way that it can run for more iterations - // than the total number of existing third party caveats, - // whatever their ids. - caveats := m.m.Caveats() -again: - for i := len(m.caveatData); ; i++ { - // We append a varint to the end of the id and assume that - // any client that's created the id that we're using as a base - // is using similar conventions - in the worst case they might - // end up with a duplicate third party caveat id and thus create - // a macaroon that cannot be discharged. - id1 := appendUvarint(id, uint64(i)) - for _, cav := range caveats { - if cav.VerificationId != nil && bytes.Equal(cav.Id, id1) { - continue again - } - } - return id1 - } -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go deleted file mode 100644 index 83ce8908..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go +++ /dev/null @@ -1,359 +0,0 @@ -package bakery - -import ( - "bytes" - "context" - "encoding/base64" - "sort" - - "github.com/go-macaroon-bakery/macaroonpb" - "github.com/rogpeppe/fastuuid" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v2" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -// MacaroonVerifier verifies macaroons and returns the operations and -// caveats they're associated with. -type MacaroonVerifier interface { - // VerifyMacaroon verifies the signature of the given macaroon and returns - // information on its associated operations, and all the first party - // caveat conditions that need to be checked. - // - // This method should not check first party caveats itself. - // - // It should return a *VerificationError if the error occurred - // because the macaroon signature failed or the root key - // was not found - any other error will be treated as fatal - // by Checker and cause authorization to terminate. - VerifyMacaroon(ctx context.Context, ms macaroon.Slice) ([]Op, []string, error) -} - -var uuidGen = fastuuid.MustNewGenerator() - -// Oven bakes macaroons. They emerge sweet and delicious -// and ready for use in a Checker. -// -// All macaroons are associated with one or more operations (see -// the Op type) which define the capabilities of the macaroon. -// -// There is one special operation, "login" (defined by LoginOp) -// which grants the capability to speak for a particular user. -// The login capability will never be mixed with other capabilities. -// -// It is up to the caller to decide on semantics for other operations. -type Oven struct { - p OvenParams -} - -type OvenParams struct { - // Namespace holds the namespace to use when adding first party caveats. - // If this is nil, checkers.New(nil).Namespace will be used. - Namespace *checkers.Namespace - - // RootKeyStoreForEntity returns the macaroon storage to be - // used for root keys associated with macaroons created - // wth NewMacaroon. - // - // If this is nil, NewMemRootKeyStore will be used to create - // a new store to be used for all entities. - RootKeyStoreForOps func(ops []Op) RootKeyStore - - // Key holds the private key pair used to encrypt third party caveats. - // If it is nil, no third party caveats can be created. - Key *KeyPair - - // Location holds the location that will be associated with new macaroons - // (as returned by Macaroon.Location). - Location string - - // Locator is used to find out information on third parties when - // adding third party caveats. If this is nil, no non-local third - // party caveats can be added. - Locator ThirdPartyLocator - - // LegacyMacaroonOp holds the operation to associate with old - // macaroons that don't have associated operations. - // If this is empty, legacy macaroons will not be associated - // with any operations. - LegacyMacaroonOp Op - - // TODO max macaroon or macaroon id size? -} - -// NewOven returns a new oven using the given parameters. -func NewOven(p OvenParams) *Oven { - if p.Locator == nil { - p.Locator = emptyLocator{} - } - if p.RootKeyStoreForOps == nil { - store := NewMemRootKeyStore() - p.RootKeyStoreForOps = func(ops []Op) RootKeyStore { - return store - } - } - if p.Namespace == nil { - p.Namespace = checkers.New(nil).Namespace() - } - return &Oven{ - p: p, - } -} - -// VerifyMacaroon implements MacaroonVerifier.VerifyMacaroon, making Oven -// an instance of MacaroonVerifier. -// -// For macaroons minted with previous bakery versions, it always -// returns a single LoginOp operation. -func (o *Oven) VerifyMacaroon(ctx context.Context, ms macaroon.Slice) (ops []Op, conditions []string, err error) { - if len(ms) == 0 { - return nil, nil, errgo.Newf("no macaroons in slice") - } - storageId, ops, err := o.decodeMacaroonId(ms[0].Id()) - if err != nil { - return nil, nil, errgo.Mask(err) - } - rootKey, err := o.p.RootKeyStoreForOps(ops).Get(ctx, storageId) - if err != nil { - if errgo.Cause(err) != ErrNotFound { - return nil, nil, errgo.Notef(err, "cannot get macaroon") - } - // If the macaroon was not found, it is probably - // because it's been removed after time-expiry, - // so return a verification error. - return nil, nil, &VerificationError{ - Reason: errgo.Newf("macaroon not found in storage"), - } - } - conditions, err = ms[0].VerifySignature(rootKey, ms[1:]) - if err != nil { - return nil, nil, &VerificationError{ - Reason: errgo.Mask(err), - } - } - return ops, conditions, nil -} - -func (o *Oven) decodeMacaroonId(id []byte) (storageId []byte, ops []Op, err error) { - base64Decoded := false - if id[0] == 'A' { - // The first byte is not a version number and it's 'A', which is the - // base64 encoding of the top 6 bits (all zero) of the version number 2 or 3, - // so we assume that it's the base64 encoding of a new-style - // macaroon id, so we base64 decode it. - // - // Note that old-style ids always start with an ASCII character >= 4 - // (> 32 in fact) so this logic won't be triggered for those. - dec := make([]byte, base64.RawURLEncoding.DecodedLen(len(id))) - n, err := base64.RawURLEncoding.Decode(dec, id) - if err == nil { - // Set the id only on success - if it's a bad encoding, we'll get a not-found error - // which is fine because "not found" is a correct description of the issue - we - // can't find the root key for the given id. - id = dec[0:n] - base64Decoded = true - } - } - // Trim any extraneous information from the id before retrieving - // it from storage, including the UUID that's added when - // creating macaroons to make all macaroons unique even if - // they're using the same root key. - switch id[0] { - case byte(Version2): - // Skip the UUID at the start of the id. - storageId = id[1+16:] - case byte(Version3): - var id1 macaroonpb.MacaroonId - if err := id1.UnmarshalBinary(id[1:]); err != nil { - return nil, nil, errgo.Notef(err, "cannot unmarshal macaroon id") - } - if len(id1.Ops) == 0 || len(id1.Ops[0].Actions) == 0 { - return nil, nil, errgo.Newf("no operations found in macaroon") - } - ops = make([]Op, 0, len(id1.Ops)) - for _, op := range id1.Ops { - for _, action := range op.Actions { - ops = append(ops, Op{ - Entity: op.Entity, - Action: action, - }) - } - } - return id1.StorageId, ops, nil - } - if !base64Decoded && isLowerCaseHexChar(id[0]) { - // It's an old-style id, probably with a hyphenated UUID. - // so trim that off. - if i := bytes.LastIndexByte(id, '-'); i >= 0 { - storageId = id[0:i] - } - } - if op := o.p.LegacyMacaroonOp; op != (Op{}) { - ops = []Op{op} - } - return storageId, ops, nil -} - -// NewMacaroon takes a macaroon with the given version from the oven, associates it with the given operations -// and attaches the given caveats. There must be at least one operation specified. -func (o *Oven) NewMacaroon(ctx context.Context, version Version, caveats []checkers.Caveat, ops ...Op) (*Macaroon, error) { - if len(ops) == 0 { - return nil, errgo.Newf("cannot mint a macaroon associated with no operations") - } - ops = CanonicalOps(ops) - rootKey, storageId, err := o.p.RootKeyStoreForOps(ops).RootKey(ctx) - if err != nil { - return nil, errgo.Mask(err) - } - id, err := o.newMacaroonId(ctx, ops, storageId) - if err != nil { - return nil, errgo.Mask(err) - } - idBytesNoVersion, err := id.MarshalBinary() - if err != nil { - return nil, errgo.Mask(err) - } - idBytes := make([]byte, len(idBytesNoVersion)+1) - idBytes[0] = byte(LatestVersion) - // TODO We could use a proto.Buffer to avoid this copy. - copy(idBytes[1:], idBytesNoVersion) - - if MacaroonVersion(version) < macaroon.V2 { - // The old macaroon format required valid text for the macaroon id, - // so base64-encode it. - b64data := make([]byte, base64.RawURLEncoding.EncodedLen(len(idBytes))) - base64.RawURLEncoding.Encode(b64data, idBytes) - idBytes = b64data - } - m, err := NewMacaroon(rootKey, idBytes, o.p.Location, version, o.p.Namespace) - if err != nil { - return nil, errgo.Notef(err, "cannot create macaroon with version %v", version) - } - if err := o.AddCaveats(ctx, m, caveats); err != nil { - return nil, errgo.Mask(err) - } - return m, nil -} - -// AddCaveat adds a caveat to the given macaroon. -func (o *Oven) AddCaveat(ctx context.Context, m *Macaroon, cav checkers.Caveat) error { - return m.AddCaveat(ctx, cav, o.p.Key, o.p.Locator) -} - -// AddCaveats adds all the caveats to the given macaroon. -func (o *Oven) AddCaveats(ctx context.Context, m *Macaroon, caveats []checkers.Caveat) error { - return m.AddCaveats(ctx, caveats, o.p.Key, o.p.Locator) -} - -// Key returns the oven's private/public key par. -func (o *Oven) Key() *KeyPair { - return o.p.Key -} - -// Locator returns the third party locator that the -// oven was created with. -func (o *Oven) Locator() ThirdPartyLocator { - return o.p.Locator -} - -// CanonicalOps returns the given operations slice sorted -// with duplicates removed. -func CanonicalOps(ops []Op) []Op { - canonOps := opsByValue(ops) - needNewSlice := false - for i := 1; i < len(ops); i++ { - if !canonOps.Less(i-1, i) { - needNewSlice = true - break - } - } - if !needNewSlice { - return ops - } - canonOps = make([]Op, len(ops)) - copy(canonOps, ops) - sort.Sort(canonOps) - - // Note we know that there's at least one operation here - // because we'd have returned earlier if the slice was empty. - j := 0 - for _, op := range canonOps[1:] { - if op != canonOps[j] { - j++ - canonOps[j] = op - } - } - return canonOps[0 : j+1] -} - -func (o *Oven) newMacaroonId(ctx context.Context, ops []Op, storageId []byte) (*macaroonpb.MacaroonId, error) { - uuid := uuidGen.Next() - nonce := uuid[0:16] - return &macaroonpb.MacaroonId{ - Nonce: nonce, - StorageId: storageId, - Ops: macaroonIdOps(ops), - }, nil -} - -// macaroonIdOps returns operations suitable for serializing -// as part of an *macaroonpb.MacaroonId. It assumes that -// ops has been canonicalized and that there's at least -// one operation. -func macaroonIdOps(ops []Op) []*macaroonpb.Op { - idOps := make([]macaroonpb.Op, 0, len(ops)) - idOps = append(idOps, macaroonpb.Op{ - Entity: ops[0].Entity, - Actions: []string{ops[0].Action}, - }) - i := 0 - idOp := &idOps[0] - for _, op := range ops[1:] { - if op.Entity != idOp.Entity { - idOps = append(idOps, macaroonpb.Op{ - Entity: op.Entity, - Actions: []string{op.Action}, - }) - i++ - idOp = &idOps[i] - continue - } - if op.Action != idOp.Actions[len(idOp.Actions)-1] { - idOp.Actions = append(idOp.Actions, op.Action) - } - } - idOpPtrs := make([]*macaroonpb.Op, len(idOps)) - for i := range idOps { - idOpPtrs[i] = &idOps[i] - } - return idOpPtrs -} - -type opsByValue []Op - -func (o opsByValue) Less(i, j int) bool { - o0, o1 := o[i], o[j] - if o0.Entity != o1.Entity { - return o0.Entity < o1.Entity - } - return o0.Action < o1.Action -} - -func (o opsByValue) Swap(i, j int) { - o[i], o[j] = o[j], o[i] -} - -func (o opsByValue) Len() int { - return len(o) -} - -func isLowerCaseHexChar(c byte) bool { - switch { - case '0' <= c && c <= '9': - return true - case 'a' <= c && c <= 'f': - return true - } - return false -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go deleted file mode 100644 index 20c5fcc7..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go +++ /dev/null @@ -1,134 +0,0 @@ -package bakery - -import ( - "context" - "fmt" - "time" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v2" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -// Slice holds a slice of unbound macaroons. -type Slice []*Macaroon - -// Bind prepares the macaroon slice for use in a request. This must be -// done before presenting the macaroons to a service for use as -// authorization tokens. The result will only be valid -// if s contains discharge macaroons for all third party -// caveats. -// -// All the macaroons in the returned slice will be copies -// of this in s, not references. -func (s Slice) Bind() macaroon.Slice { - if len(s) == 0 { - return nil - } - ms := make(macaroon.Slice, len(s)) - ms[0] = s[0].M().Clone() - rootSig := ms[0].Signature() - for i, m := range s[1:] { - m1 := m.M().Clone() - m1.Bind(rootSig) - ms[i+1] = m1 - } - return ms -} - -// Purge returns a new slice holding all macaroons in s -// that expire after the given time. -func (ms Slice) Purge(t time.Time) Slice { - ms1 := make(Slice, 0, len(ms)) - for i, m := range ms { - et, ok := checkers.ExpiryTime(m.Namespace(), m.M().Caveats()) - if !ok || et.After(t) { - ms1 = append(ms1, m) - } else if i == 0 { - // The primary macaroon has expired, so all its discharges - // have expired too. - // TODO purge all discharge macaroons when the macaroon - // containing their third-party caveat expires. - return nil - } - } - return ms1 -} - -// DischargeAll discharges all the third party caveats in the slice for -// which discharge macaroons are not already present, using getDischarge -// to acquire the discharge macaroons. It always returns the slice with -// any acquired discharge macaroons added, even on error. It returns an -// error if all the discharges could not be acquired. -// -// Note that this differs from DischargeAll in that it can be given several existing -// discharges, and that the resulting discharges are not bound to the primary, -// so it's still possible to add caveats and reacquire expired discharges -// without reacquiring the primary macaroon. -func (ms Slice) DischargeAll(ctx context.Context, getDischarge func(ctx context.Context, cav macaroon.Caveat, encryptedCaveat []byte) (*Macaroon, error), localKey *KeyPair) (Slice, error) { - if len(ms) == 0 { - return nil, errgo.Newf("no macaroons to discharge") - } - ms1 := make(Slice, len(ms)) - copy(ms1, ms) - // have holds the keys of all the macaroon ids in the slice. - type needCaveat struct { - // cav holds the caveat that needs discharge. - cav macaroon.Caveat - // encryptedCaveat holds encrypted caveat - // if it was held externally. - encryptedCaveat []byte - } - var need []needCaveat - have := make(map[string]bool) - for _, m := range ms[1:] { - have[string(m.M().Id())] = true - } - // addCaveats adds any required third party caveats to the need slice - // that aren't already present . - addCaveats := func(m *Macaroon) { - for _, cav := range m.M().Caveats() { - if len(cav.VerificationId) == 0 || have[string(cav.Id)] { - continue - } - need = append(need, needCaveat{ - cav: cav, - encryptedCaveat: m.caveatData[string(cav.Id)], - }) - } - } - for _, m := range ms { - addCaveats(m) - } - var errs []error - for len(need) > 0 { - cav := need[0] - need = need[1:] - var dm *Macaroon - var err error - if localKey != nil && cav.cav.Location == "local" { - // TODO use a small caveat id. - dm, err = Discharge(ctx, DischargeParams{ - Key: localKey, - Checker: localDischargeChecker, - Caveat: cav.encryptedCaveat, - Id: cav.cav.Id, - Locator: emptyLocator{}, - }) - } else { - dm, err = getDischarge(ctx, cav.cav, cav.encryptedCaveat) - } - if err != nil { - errs = append(errs, errgo.NoteMask(err, fmt.Sprintf("cannot get discharge from %q", cav.cav.Location), errgo.Any)) - continue - } - ms1 = append(ms1, dm) - addCaveats(dm) - } - if errs != nil { - // TODO log other errors? Return them all? - return ms1, errgo.Mask(errs[0], errgo.Any) - } - return ms1, nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go deleted file mode 100644 index b8b19408..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go +++ /dev/null @@ -1,63 +0,0 @@ -package bakery - -import ( - "context" - "sync" -) - -// RootKeyStore defines store for macaroon root keys. -type RootKeyStore interface { - // Get returns the root key for the given id. - // If the item is not there, it returns ErrNotFound. - Get(ctx context.Context, id []byte) ([]byte, error) - - // RootKey returns the root key to be used for making a new - // macaroon, and an id that can be used to look it up later with - // the Get method. - // - // Note that the root keys should remain available for as long - // as the macaroons using them are valid. - // - // Note that there is no need for it to return a new root key - // for every call - keys may be reused, although some key - // cycling is over time is advisable. - RootKey(ctx context.Context) (rootKey []byte, id []byte, err error) -} - -// NewMemRootKeyStore returns an implementation of -// Store that generates a single key and always -// returns that from RootKey. The same id ("0") is always -// used. -func NewMemRootKeyStore() RootKeyStore { - return new(memRootKeyStore) -} - -type memRootKeyStore struct { - mu sync.Mutex - key []byte -} - -// Get implements Store.Get. -func (s *memRootKeyStore) Get(_ context.Context, id []byte) ([]byte, error) { - s.mu.Lock() - defer s.mu.Unlock() - if len(id) != 1 || id[0] != '0' || s.key == nil { - return nil, ErrNotFound - } - return s.key, nil -} - -// RootKey implements Store.RootKey by always returning the same root -// key. -func (s *memRootKeyStore) RootKey(context.Context) (rootKey, id []byte, err error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.key == nil { - newKey, err := randomBytes(24) - if err != nil { - return nil, nil, err - } - s.key = newKey - } - return s.key, []byte("0"), nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go deleted file mode 100644 index 9f8e87bb..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go +++ /dev/null @@ -1,30 +0,0 @@ -package bakery - -import "gopkg.in/macaroon.v2" - -// Version represents a version of the bakery protocol. -type Version int - -const ( - // In version 0, discharge-required errors use status 407 - Version0 Version = 0 - // In version 1, discharge-required errors use status 401. - Version1 Version = 1 - // In version 2, binary macaroons and caveat ids are supported. - Version2 Version = 2 - // In version 3, we support operations associated with macaroons - // and external third party caveats. - Version3 Version = 3 - LatestVersion = Version3 -) - -// MacaroonVersion returns the macaroon version that should -// be used with the given bakery Version. -func MacaroonVersion(v Version) macaroon.Version { - switch v { - case Version0, Version1: - return macaroon.V1 - default: - return macaroon.V2 - } -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go deleted file mode 100644 index 8cc2e2a3..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go +++ /dev/null @@ -1,200 +0,0 @@ -package httpbakery - -import ( - "context" - "fmt" - "net/http" - "net/url" - "os" - - "github.com/juju/webbrowser" - "gopkg.in/errgo.v1" - "gopkg.in/httprequest.v1" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery" -) - -const WebBrowserInteractionKind = "browser-window" - -// WaitTokenResponse holds the response type -// returned, JSON-encoded, from the waitToken -// URL passed to SetBrowserInteraction. -type WaitTokenResponse struct { - Kind string `json:"kind"` - // Token holds the token value when it's well-formed utf-8 - Token string `json:"token,omitempty"` - // Token64 holds the token value, base64 encoded, when it's - // not well-formed utf-8. - Token64 string `json:"token64,omitempty"` -} - -// WaitResponse holds the type that should be returned -// by an HTTP response made to a LegacyWaitURL -// (See the ErrorInfo type). -type WaitResponse struct { - Macaroon *bakery.Macaroon -} - -// WebBrowserInteractionInfo holds the information -// expected in the browser-window interaction -// entry in an interaction-required error. -type WebBrowserInteractionInfo struct { - // VisitURL holds the URL to be visited in a web browser. - VisitURL string - - // WaitTokenURL holds a URL that will block on GET - // until the browser interaction has completed. - // On success, the response is expected to hold a waitTokenResponse - // in its body holding the token to be returned from the - // Interact method. - WaitTokenURL string -} - -var ( - _ Interactor = WebBrowserInteractor{} - _ LegacyInteractor = WebBrowserInteractor{} -) - -// OpenWebBrowser opens a web browser at the -// given URL. If the OS is not recognised, the URL -// is just printed to standard output. -func OpenWebBrowser(url *url.URL) error { - err := webbrowser.Open(url) - if err == nil { - fmt.Fprintf(os.Stderr, "Opening an authorization web page in your browser.\n") - fmt.Fprintf(os.Stderr, "If it does not open, please open this URL:\n%s\n", url) - return nil - } - if err == webbrowser.ErrNoBrowser { - fmt.Fprintf(os.Stderr, "Please open this URL in your browser to authorize:\n%s\n", url) - return nil - } - return err -} - -// SetWebBrowserInteraction adds information about web-browser-based -// interaction to the given error, which should be an -// interaction-required error that's about to be returned from a -// discharge request. -// -// The visitURL parameter holds a URL that should be visited by the user -// in a web browser; the waitTokenURL parameter holds a URL that can be -// long-polled to acquire the resulting discharge token. -// -// Use SetLegacyInteraction to add support for legacy clients -// that don't understand the newer InteractionMethods field. -func SetWebBrowserInteraction(e *Error, visitURL, waitTokenURL string) { - e.SetInteraction(WebBrowserInteractionKind, WebBrowserInteractionInfo{ - VisitURL: visitURL, - WaitTokenURL: waitTokenURL, - }) -} - -// SetLegacyInteraction adds information about web-browser-based -// interaction (or other kinds of legacy-protocol interaction) to the -// given error, which should be an interaction-required error that's -// about to be returned from a discharge request. -// -// The visitURL parameter holds a URL that should be visited by the user -// in a web browser (or with an "Accept: application/json" header to -// find out the set of legacy interaction methods). -// -// The waitURL parameter holds a URL that can be long-polled -// to acquire the discharge macaroon. -func SetLegacyInteraction(e *Error, visitURL, waitURL string) { - if e.Info == nil { - e.Info = new(ErrorInfo) - } - e.Info.LegacyVisitURL = visitURL - e.Info.LegacyWaitURL = waitURL -} - -// WebBrowserInteractor handls web-browser-based -// interaction-required errors by opening a web -// browser to allow the user to prove their -// credentials interactively. -// -// It implements the Interactor interface, so instances -// can be used with Client.AddInteractor. -type WebBrowserInteractor struct { - // OpenWebBrowser is used to visit a page in - // the user's web browser. If it's nil, the - // OpenWebBrowser function will be used. - OpenWebBrowser func(*url.URL) error -} - -// Kind implements Interactor.Kind. -func (WebBrowserInteractor) Kind() string { - return WebBrowserInteractionKind -} - -// Interact implements Interactor.Interact by opening a new web page. -func (wi WebBrowserInteractor) Interact(ctx context.Context, client *Client, location string, irErr *Error) (*DischargeToken, error) { - var p WebBrowserInteractionInfo - if err := irErr.InteractionMethod(wi.Kind(), &p); err != nil { - return nil, errgo.Mask(err, errgo.Is(ErrInteractionMethodNotFound)) - } - visitURL, err := relativeURL(location, p.VisitURL) - if err != nil { - return nil, errgo.Notef(err, "cannot make relative visit URL") - } - waitTokenURL, err := relativeURL(location, p.WaitTokenURL) - if err != nil { - return nil, errgo.Notef(err, "cannot make relative wait URL") - } - if err := wi.openWebBrowser(visitURL); err != nil { - return nil, errgo.Mask(err) - } - return waitForToken(ctx, client, waitTokenURL) -} - -func (wi WebBrowserInteractor) openWebBrowser(u *url.URL) error { - open := wi.OpenWebBrowser - if open == nil { - open = OpenWebBrowser - } - if err := open(u); err != nil { - return errgo.Mask(err) - } - return nil -} - -// waitForToken returns a token from a the waitToken URL -func waitForToken(ctx context.Context, client *Client, waitTokenURL *url.URL) (*DischargeToken, error) { - // TODO integrate this with waitForMacaroon somehow? - req, err := http.NewRequest("GET", waitTokenURL.String(), nil) - if err != nil { - return nil, errgo.Mask(err) - } - req = req.WithContext(ctx) - httpResp, err := client.Client.Do(req) - if err != nil { - return nil, errgo.Notef(err, "cannot get %q", waitTokenURL) - } - defer httpResp.Body.Close() - if httpResp.StatusCode != http.StatusOK { - err := unmarshalError(httpResp) - return nil, errgo.NoteMask(err, "cannot acquire discharge token", errgo.Any) - } - var resp WaitTokenResponse - if err := httprequest.UnmarshalJSONResponse(httpResp, &resp); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal wait response") - } - tokenVal, err := maybeBase64Decode(resp.Token, resp.Token64) - if err != nil { - return nil, errgo.Notef(err, "bad discharge token") - } - // TODO check that kind and value are non-empty? - return &DischargeToken{ - Kind: resp.Kind, - Value: tokenVal, - }, nil -} - -// LegacyInteract implements LegacyInteractor by opening a web browser page. -func (wi WebBrowserInteractor) LegacyInteract(ctx context.Context, client *Client, location string, visitURL *url.URL) error { - if err := wi.openWebBrowser(visitURL); err != nil { - return errgo.Mask(err) - } - return nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go deleted file mode 100644 index befc0e17..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go +++ /dev/null @@ -1,157 +0,0 @@ -package httpbakery - -import ( - "context" - "net" - "net/http" - - "gopkg.in/errgo.v1" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -type httpRequestKey struct{} - -// ContextWithRequest returns the context with information from the -// given request attached as context. This is used by the httpbakery -// checkers (see RegisterCheckers for details). -func ContextWithRequest(ctx context.Context, req *http.Request) context.Context { - return context.WithValue(ctx, httpRequestKey{}, req) -} - -func requestFromContext(ctx context.Context) *http.Request { - req, _ := ctx.Value(httpRequestKey{}).(*http.Request) - return req -} - -const ( - // CondClientIPAddr holds the first party caveat condition - // that checks a client's IP address. - CondClientIPAddr = "client-ip-addr" - - // CondClientOrigin holds the first party caveat condition that - // checks a client's origin header. - CondClientOrigin = "origin" -) - -// CheckersNamespace holds the URI of the HTTP checkers schema. -const CheckersNamespace = "http" - -var allCheckers = map[string]checkers.Func{ - CondClientIPAddr: ipAddrCheck, - CondClientOrigin: clientOriginCheck, -} - -// RegisterCheckers registers all the HTTP checkers with the given checker. -// Current checkers include: -// -// client-ip-addr -// -// The client-ip-addr caveat checks that the HTTP request has -// the given remote IP address. -// -// origin -// -// The origin caveat checks that the HTTP Origin header has -// the given value. -func RegisterCheckers(c *checkers.Checker) { - c.Namespace().Register(CheckersNamespace, "http") - for cond, check := range allCheckers { - c.Register(cond, CheckersNamespace, check) - } -} - -// NewChecker returns a new checker with the standard -// and HTTP checkers registered in it. -func NewChecker() *checkers.Checker { - c := checkers.New(nil) - RegisterCheckers(c) - return c -} - -// ipAddrCheck implements the IP client address checker -// for an HTTP request. -func ipAddrCheck(ctx context.Context, cond, args string) error { - req := requestFromContext(ctx) - if req == nil { - return errgo.Newf("no IP address found in context") - } - ip := net.ParseIP(args) - if ip == nil { - return errgo.Newf("cannot parse IP address in caveat") - } - if req.RemoteAddr == "" { - return errgo.Newf("client has no remote address") - } - reqIP, err := requestIPAddr(req) - if err != nil { - return errgo.Mask(err) - } - if !reqIP.Equal(ip) { - return errgo.Newf("client IP address mismatch, got %s", reqIP) - } - return nil -} - -// clientOriginCheck implements the Origin header checker -// for an HTTP request. -func clientOriginCheck(ctx context.Context, cond, args string) error { - req := requestFromContext(ctx) - if req == nil { - return errgo.Newf("no origin found in context") - } - // Note that web browsers may not provide the origin header when it's - // not a cross-site request with a GET method. There's nothing we - // can do about that, so just allow all requests with an empty origin. - if reqOrigin := req.Header.Get("Origin"); reqOrigin != "" && reqOrigin != args { - return errgo.Newf("request has invalid Origin header; got %q", reqOrigin) - } - return nil -} - -// SameClientIPAddrCaveat returns a caveat that will check that -// the remote IP address is the same as that in the given HTTP request. -func SameClientIPAddrCaveat(req *http.Request) checkers.Caveat { - if req.RemoteAddr == "" { - return checkers.ErrorCaveatf("client has no remote IP address") - } - ip, err := requestIPAddr(req) - if err != nil { - return checkers.ErrorCaveatf("%v", err) - } - return ClientIPAddrCaveat(ip) -} - -// ClientIPAddrCaveat returns a caveat that will check whether the -// client's IP address is as provided. -func ClientIPAddrCaveat(addr net.IP) checkers.Caveat { - if len(addr) != net.IPv4len && len(addr) != net.IPv6len { - return checkers.ErrorCaveatf("bad IP address %d", []byte(addr)) - } - return httpCaveat(CondClientIPAddr, addr.String()) -} - -// ClientOriginCaveat returns a caveat that will check whether the -// client's Origin header in its HTTP request is as provided. -func ClientOriginCaveat(origin string) checkers.Caveat { - return httpCaveat(CondClientOrigin, origin) -} - -func httpCaveat(cond, arg string) checkers.Caveat { - return checkers.Caveat{ - Condition: checkers.Condition(cond, arg), - Namespace: CheckersNamespace, - } -} - -func requestIPAddr(req *http.Request) (net.IP, error) { - reqHost, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - return nil, errgo.Newf("cannot parse host port in remote address: %v", err) - } - ip := net.ParseIP(reqHost) - if ip == nil { - return nil, errgo.Newf("invalid IP address in remote address %q", req.RemoteAddr) - } - return ip, nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go deleted file mode 100644 index 212f57f0..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go +++ /dev/null @@ -1,727 +0,0 @@ -package httpbakery - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "net/http/cookiejar" - "net/url" - "strings" - "time" - - "golang.org/x/net/publicsuffix" - "gopkg.in/errgo.v1" - "gopkg.in/httprequest.v1" - "gopkg.in/macaroon.v2" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery" - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -var unmarshalError = httprequest.ErrorUnmarshaler(&Error{}) - -// maxDischargeRetries holds the maximum number of times that an HTTP -// request will be retried after a third party caveat has been successfully -// discharged. -const maxDischargeRetries = 3 - -// DischargeError represents the error when a third party discharge -// is refused by a server. -type DischargeError struct { - // Reason holds the underlying remote error that caused the - // discharge to fail. - Reason *Error -} - -func (e *DischargeError) Error() string { - return fmt.Sprintf("third party refused discharge: %v", e.Reason) -} - -// IsDischargeError reports whether err is a *DischargeError. -func IsDischargeError(err error) bool { - _, ok := err.(*DischargeError) - return ok -} - -// InteractionError wraps an error returned by a call to visitWebPage. -type InteractionError struct { - // Reason holds the actual error returned from visitWebPage. - Reason error -} - -func (e *InteractionError) Error() string { - return fmt.Sprintf("cannot start interactive session: %v", e.Reason) -} - -// IsInteractionError reports whether err is an *InteractionError. -func IsInteractionError(err error) bool { - _, ok := err.(*InteractionError) - return ok -} - -// NewHTTPClient returns an http.Client that ensures -// that headers are sent to the server even when the -// server redirects a GET request. The returned client -// also contains an empty in-memory cookie jar. -// -// See https://github.com/golang/go/issues/4677 -func NewHTTPClient() *http.Client { - c := *http.DefaultClient - c.CheckRedirect = func(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return fmt.Errorf("too many redirects") - } - if len(via) == 0 { - return nil - } - for attr, val := range via[0].Header { - if attr == "Cookie" { - // Cookies are added automatically anyway. - continue - } - if _, ok := req.Header[attr]; !ok { - req.Header[attr] = val - } - } - return nil - } - jar, err := cookiejar.New(&cookiejar.Options{ - PublicSuffixList: publicsuffix.List, - }) - if err != nil { - panic(err) - } - c.Jar = jar - return &c -} - -// Client holds the context for making HTTP requests -// that automatically acquire and discharge macaroons. -type Client struct { - // Client holds the HTTP client to use. It should have a cookie - // jar configured, and when redirecting it should preserve the - // headers (see NewHTTPClient). - *http.Client - - // InteractionMethods holds a slice of supported interaction - // methods, with preferred methods earlier in the slice. - // On receiving an interaction-required error when discharging, - // the Kind method of each Interactor in turn will be called - // and, if the error indicates that the interaction kind is supported, - // the Interact method will be called to complete the discharge. - InteractionMethods []Interactor - - // Key holds the client's key. If set, the client will try to - // discharge third party caveats with the special location - // "local" by using this key. See bakery.DischargeAllWithKey and - // bakery.LocalThirdPartyCaveat for more information - Key *bakery.KeyPair - - // Logger is used to log information about client activities. - // If it is nil, bakery.DefaultLogger("httpbakery") will be used. - Logger bakery.Logger -} - -// An Interactor represents a way of persuading a discharger -// that it should grant a discharge macaroon. -type Interactor interface { - // Kind returns the interaction method name. This corresponds to the - // key in the Error.InteractionMethods type. - Kind() string - - // Interact performs the interaction, and returns a token that can be - // used to acquire the discharge macaroon. The location provides - // the third party caveat location to make it possible to use - // relative URLs. - // - // If the given interaction isn't supported by the client for - // the given location, it may return an error with an - // ErrInteractionMethodNotFound cause which will cause the - // interactor to be ignored that time. - Interact(ctx context.Context, client *Client, location string, interactionRequiredErr *Error) (*DischargeToken, error) -} - -// DischargeToken holds a token that is intended -// to persuade a discharger to discharge a third -// party caveat. -type DischargeToken struct { - // Kind holds the kind of the token. By convention this - // matches the name of the interaction method used to - // obtain the token, but that's not required. - Kind string `json:"kind"` - - // Value holds the value of the token. - Value []byte `json:"value"` -} - -// LegacyInteractor may optionally be implemented by Interactor -// implementations that implement the legacy interaction-required -// error protocols. -type LegacyInteractor interface { - // LegacyInteract implements the "visit" half of a legacy discharge - // interaction. The "wait" half will be implemented by httpbakery. - // The location is the location specified by the third party - // caveat. - LegacyInteract(ctx context.Context, client *Client, location string, visitURL *url.URL) error -} - -// NewClient returns a new Client containing an HTTP client -// created with NewHTTPClient and leaves all other fields zero. -func NewClient() *Client { - return &Client{ - Client: NewHTTPClient(), - } -} - -// AddInteractor is a convenience method that appends the given -// interactor to c.InteractionMethods. -// For example, to enable web-browser interaction on -// a client c, do: -// -// c.AddInteractor(httpbakery.WebBrowserWindowInteractor) -func (c *Client) AddInteractor(i Interactor) { - c.InteractionMethods = append(c.InteractionMethods, i) -} - -// DischargeAll attempts to acquire discharge macaroons for all the -// third party caveats in m, and returns a slice containing all -// of them bound to m. -// -// If the discharge fails because a third party refuses to discharge a -// caveat, the returned error will have a cause of type *DischargeError. -// If the discharge fails because visitWebPage returns an error, -// the returned error will have a cause of *InteractionError. -// -// The returned macaroon slice will not be stored in the client -// cookie jar (see SetCookie if you need to do that). -func (c *Client) DischargeAll(ctx context.Context, m *bakery.Macaroon) (macaroon.Slice, error) { - return bakery.DischargeAllWithKey(ctx, m, c.AcquireDischarge, c.Key) -} - -// DischargeAllUnbound is like DischargeAll except that it does not -// bind the resulting macaroons. -func (c *Client) DischargeAllUnbound(ctx context.Context, ms bakery.Slice) (bakery.Slice, error) { - return ms.DischargeAll(ctx, c.AcquireDischarge, c.Key) -} - -// Do is like DoWithContext, except the context is automatically derived. -// If using go version 1.7 or later the context will be taken from the -// given request, otherwise context.Background() will be used. -func (c *Client) Do(req *http.Request) (*http.Response, error) { - return c.do(contextFromRequest(req), req, nil) -} - -// DoWithContext sends the given HTTP request and returns its response. -// If the request fails with a discharge-required error, any required -// discharge macaroons will be acquired, and the request will be repeated -// with those attached. -// -// If the required discharges were refused by a third party, an error -// with a *DischargeError cause will be returned. -// -// If interaction is required by the user, the client's InteractionMethods -// will be used to perform interaction. An error -// with a *InteractionError cause will be returned if this interaction -// fails. See WebBrowserWindowInteractor for a possible implementation of -// an Interactor for an interaction method. -// -// DoWithContext may add headers to req.Header. -func (c *Client) DoWithContext(ctx context.Context, req *http.Request) (*http.Response, error) { - return c.do(ctx, req, nil) -} - -// DoWithCustomError is like Do except it allows a client -// to specify a custom error function, getError, which is called on the -// HTTP response and may return a non-nil error if the response holds an -// error. If the cause of the returned error is a *Error value and its -// code is ErrDischargeRequired, the macaroon in its Info field will be -// discharged and the request will be repeated with the discharged -// macaroon. If getError returns nil, it should leave the response body -// unchanged. -// -// If getError is nil, DefaultGetError will be used. -// -// This method can be useful when dealing with APIs that -// return their errors in a format incompatible with Error, but the -// need for it should be avoided when creating new APIs, -// as it makes the endpoints less amenable to generic tools. -func (c *Client) DoWithCustomError(req *http.Request, getError func(resp *http.Response) error) (*http.Response, error) { - return c.do(contextFromRequest(req), req, getError) -} - -func (c *Client) do(ctx context.Context, req *http.Request, getError func(resp *http.Response) error) (*http.Response, error) { - c.logDebugf(ctx, "client do %s %s {", req.Method, req.URL) - resp, err := c.do1(ctx, req, getError) - c.logDebugf(ctx, "} -> error %#v", err) - return resp, err -} - -func (c *Client) do1(ctx context.Context, req *http.Request, getError func(resp *http.Response) error) (*http.Response, error) { - if getError == nil { - getError = DefaultGetError - } - if c.Client.Jar == nil { - return nil, errgo.New("no cookie jar supplied in HTTP client") - } - rreq, ok := newRetryableRequest(c.Client, req) - if !ok { - return nil, fmt.Errorf("request body is not seekable") - } - defer rreq.close() - - req.Header.Set(BakeryProtocolHeader, fmt.Sprint(bakery.LatestVersion)) - - // Make several attempts to do the request, because we might have - // to get through several layers of security. We only retry if - // we get a DischargeRequiredError and succeed in discharging - // the macaroon in it. - retry := 0 - for { - resp, err := c.do2(ctx, rreq, getError) - if err == nil || !isDischargeRequiredError(err) { - return resp, errgo.Mask(err, errgo.Any) - } - if retry++; retry > maxDischargeRetries { - return nil, errgo.NoteMask(err, fmt.Sprintf("too many (%d) discharge requests", retry-1), errgo.Any) - } - if err1 := c.HandleError(ctx, req.URL, err); err1 != nil { - return nil, errgo.Mask(err1, errgo.Any) - } - c.logDebugf(ctx, "discharge succeeded; retry %d", retry) - } -} - -func (c *Client) do2(ctx context.Context, rreq *retryableRequest, getError func(resp *http.Response) error) (*http.Response, error) { - httpResp, err := rreq.do(ctx) - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - err = getError(httpResp) - if err == nil { - c.logInfof(ctx, "HTTP response OK (status %v)", httpResp.Status) - return httpResp, nil - } - httpResp.Body.Close() - return nil, errgo.Mask(err, errgo.Any) -} - -// HandleError tries to resolve the given error, which should be a -// response to the given URL, by discharging any macaroon contained in -// it. That is, if the error cause is an *Error and its code is -// ErrDischargeRequired, then it will try to discharge -// err.Info.Macaroon. If the discharge succeeds, the discharged macaroon -// will be saved to the client's cookie jar and ResolveError will return -// nil. -// -// For any other kind of error, the original error will be returned. -func (c *Client) HandleError(ctx context.Context, reqURL *url.URL, err error) error { - respErr, ok := errgo.Cause(err).(*Error) - if !ok { - return err - } - if respErr.Code != ErrDischargeRequired { - return respErr - } - if respErr.Info == nil || respErr.Info.Macaroon == nil { - return errgo.New("no macaroon found in discharge-required response") - } - mac := respErr.Info.Macaroon - macaroons, err := bakery.DischargeAllWithKey(ctx, mac, c.AcquireDischarge, c.Key) - if err != nil { - return errgo.Mask(err, errgo.Any) - } - var cookiePath string - if path := respErr.Info.MacaroonPath; path != "" { - relURL, err := parseURLPath(path) - if err != nil { - c.logInfof(ctx, "ignoring invalid path in discharge-required response: %v", err) - } else { - cookiePath = reqURL.ResolveReference(relURL).Path - } - } - // TODO use a namespace taken from the error response. - cookie, err := NewCookie(nil, macaroons) - if err != nil { - return errgo.Notef(err, "cannot make cookie") - } - cookie.Path = cookiePath - if name := respErr.Info.CookieNameSuffix; name != "" { - cookie.Name = "macaroon-" + name - } - c.Jar.SetCookies(reqURL, []*http.Cookie{cookie}) - return nil -} - -// DefaultGetError is the default error unmarshaler used by Client.Do. -func DefaultGetError(httpResp *http.Response) error { - if httpResp.StatusCode != http.StatusProxyAuthRequired && httpResp.StatusCode != http.StatusUnauthorized { - return nil - } - // Check for the new protocol discharge error. - if httpResp.StatusCode == http.StatusUnauthorized && httpResp.Header.Get("WWW-Authenticate") != "Macaroon" { - return nil - } - if httpResp.Header.Get("Content-Type") != "application/json" { - return nil - } - var resp Error - if err := json.NewDecoder(httpResp.Body).Decode(&resp); err != nil { - return fmt.Errorf("cannot unmarshal error response: %v", err) - } - return &resp -} - -func parseURLPath(path string) (*url.URL, error) { - u, err := url.Parse(path) - if err != nil { - return nil, errgo.Mask(err) - } - if u.Scheme != "" || - u.Opaque != "" || - u.User != nil || - u.Host != "" || - u.RawQuery != "" || - u.Fragment != "" { - return nil, errgo.Newf("URL path %q is not clean", path) - } - return u, nil -} - -// PermanentExpiryDuration holds the length of time a cookie -// holding a macaroon with no time-before caveat will be -// stored. -const PermanentExpiryDuration = 100 * 365 * 24 * time.Hour - -// NewCookie takes a slice of macaroons and returns them -// encoded as a cookie. The slice should contain a single primary -// macaroon in its first element, and any discharges after that. -// -// The given namespace specifies the first party caveat namespace, -// used for deriving the expiry time of the cookie. -func NewCookie(ns *checkers.Namespace, ms macaroon.Slice) (*http.Cookie, error) { - if len(ms) == 0 { - return nil, errgo.New("no macaroons in cookie") - } - // TODO(rog) marshal cookie as binary if version allows. - data, err := json.Marshal(ms) - if err != nil { - return nil, errgo.Notef(err, "cannot marshal macaroons") - } - cookie := &http.Cookie{ - Name: fmt.Sprintf("macaroon-%x", ms[0].Signature()), - Value: base64.StdEncoding.EncodeToString(data), - } - expires, found := checkers.MacaroonsExpiryTime(ns, ms) - if !found { - // The macaroon doesn't expire - use a very long expiry - // time for the cookie. - expires = time.Now().Add(PermanentExpiryDuration) - } else if expires.Sub(time.Now()) < time.Minute { - // The macaroon might have expired already, or it's - // got a short duration, so treat it as a session cookie - // by setting Expires to the zero time. - expires = time.Time{} - } - cookie.Expires = expires - // TODO(rog) other fields. - return cookie, nil -} - -// SetCookie sets a cookie for the given URL on the given cookie jar -// that will holds the given macaroon slice. The macaroon slice should -// contain a single primary macaroon in its first element, and any -// discharges after that. -// -// The given namespace specifies the first party caveat namespace, -// used for deriving the expiry time of the cookie. -func SetCookie(jar http.CookieJar, url *url.URL, ns *checkers.Namespace, ms macaroon.Slice) error { - cookie, err := NewCookie(ns, ms) - if err != nil { - return errgo.Mask(err) - } - jar.SetCookies(url, []*http.Cookie{cookie}) - return nil -} - -// MacaroonsForURL returns any macaroons associated with the -// given URL in the given cookie jar. -func MacaroonsForURL(jar http.CookieJar, u *url.URL) []macaroon.Slice { - return cookiesToMacaroons(jar.Cookies(u)) -} - -func appendURLElem(u, elem string) string { - if strings.HasSuffix(u, "/") { - return u + elem - } - return u + "/" + elem -} - -// AcquireDischarge acquires a discharge macaroon from the caveat location as an HTTP URL. -// It fits the getDischarge argument type required by bakery.DischargeAll. -func (c *Client) AcquireDischarge(ctx context.Context, cav macaroon.Caveat, payload []byte) (*bakery.Macaroon, error) { - m, err := c.acquireDischarge(ctx, cav, payload, nil) - if err == nil { - return m, nil - } - cause, ok := errgo.Cause(err).(*Error) - if !ok { - return nil, errgo.NoteMask(err, "cannot acquire discharge", IsInteractionError) - } - if cause.Code != ErrInteractionRequired { - return nil, &DischargeError{ - Reason: cause, - } - } - if cause.Info == nil { - return nil, errgo.Notef(err, "interaction-required response with no info") - } - // Make sure the location has a trailing slash so that - // the relative URL calculations work correctly even when - // cav.Location doesn't have a trailing slash. - loc := appendURLElem(cav.Location, "") - token, m, err := c.interact(ctx, loc, cause, payload) - if err != nil { - return nil, errgo.Mask(err, IsDischargeError, IsInteractionError) - } - if m != nil { - // We've acquired the macaroon directly via legacy interaction. - return m, nil - } - - // Try to acquire the discharge again, but this time with - // the token acquired by the interaction method. - m, err = c.acquireDischarge(ctx, cav, payload, token) - if err != nil { - return nil, errgo.Mask(err, IsDischargeError, IsInteractionError) - } - return m, nil -} - -// acquireDischarge is like AcquireDischarge except that it also -// takes a token acquired from an interaction method. -func (c *Client) acquireDischarge( - ctx context.Context, - cav macaroon.Caveat, - payload []byte, - token *DischargeToken, -) (*bakery.Macaroon, error) { - dclient := newDischargeClient(cav.Location, c) - var req dischargeRequest - req.Id, req.Id64 = maybeBase64Encode(cav.Id) - if token != nil { - req.Token, req.Token64 = maybeBase64Encode(token.Value) - req.TokenKind = token.Kind - } - req.Caveat = base64.RawURLEncoding.EncodeToString(payload) - resp, err := dclient.Discharge(ctx, &req) - if err == nil { - return resp.Macaroon, nil - } - return nil, errgo.Mask(err, errgo.Any) -} - -// interact gathers a macaroon by directing the user to interact with a -// web page. The irErr argument holds the interaction-required -// error response. -func (c *Client) interact(ctx context.Context, location string, irErr *Error, payload []byte) (*DischargeToken, *bakery.Macaroon, error) { - if len(c.InteractionMethods) == 0 { - return nil, nil, &InteractionError{ - Reason: errgo.New("interaction required but not possible"), - } - } - if irErr.Info.InteractionMethods == nil && irErr.Info.LegacyVisitURL != "" { - // It's an old-style error; deal with it differently. - m, err := c.legacyInteract(ctx, location, irErr) - if err != nil { - return nil, nil, errgo.Mask(err, IsDischargeError, IsInteractionError) - } - return nil, m, nil - } - for _, interactor := range c.InteractionMethods { - c.logDebugf(ctx, "checking interaction method %q", interactor.Kind()) - if _, ok := irErr.Info.InteractionMethods[interactor.Kind()]; ok { - c.logDebugf(ctx, "found possible interaction method %q", interactor.Kind()) - token, err := interactor.Interact(ctx, c, location, irErr) - if err != nil { - if errgo.Cause(err) == ErrInteractionMethodNotFound { - continue - } - return nil, nil, errgo.Mask(err, IsDischargeError, IsInteractionError) - } - if token == nil { - return nil, nil, errgo.New("interaction method returned an empty token") - } - return token, nil, nil - } else { - c.logDebugf(ctx, "interaction method %q not found in %#v", interactor.Kind(), irErr.Info.InteractionMethods) - } - } - return nil, nil, &InteractionError{ - Reason: errgo.Newf("no supported interaction method"), - } -} - -func (c *Client) legacyInteract(ctx context.Context, location string, irErr *Error) (*bakery.Macaroon, error) { - visitURL, err := relativeURL(location, irErr.Info.LegacyVisitURL) - if err != nil { - return nil, errgo.Mask(err) - } - waitURL, err := relativeURL(location, irErr.Info.LegacyWaitURL) - if err != nil { - return nil, errgo.Mask(err) - } - methodURLs := map[string]*url.URL{ - "interactive": visitURL, - } - if len(c.InteractionMethods) > 1 || c.InteractionMethods[0].Kind() != WebBrowserInteractionKind { - // We have several possible methods or we only support a non-window - // method, so we need to fetch the possible methods supported by the discharger. - methodURLs = legacyGetInteractionMethods(ctx, c.logger(), c, visitURL) - } - for _, interactor := range c.InteractionMethods { - kind := interactor.Kind() - if kind == WebBrowserInteractionKind { - // This is the old name for browser-window interaction. - kind = "interactive" - } - interactor, ok := interactor.(LegacyInteractor) - if !ok { - // Legacy interaction mode isn't supported. - continue - } - visitURL, ok := methodURLs[kind] - if !ok { - continue - } - visitURL, err := relativeURL(location, visitURL.String()) - if err != nil { - return nil, errgo.Mask(err) - } - if err := interactor.LegacyInteract(ctx, c, location, visitURL); err != nil { - return nil, &InteractionError{ - Reason: errgo.Mask(err, errgo.Any), - } - } - return waitForMacaroon(ctx, c, waitURL) - } - return nil, &InteractionError{ - Reason: errgo.Newf("no methods supported"), - } -} - -func (c *Client) logDebugf(ctx context.Context, f string, a ...interface{}) { - c.logger().Debugf(ctx, f, a...) -} - -func (c *Client) logInfof(ctx context.Context, f string, a ...interface{}) { - c.logger().Infof(ctx, f, a...) -} - -func (c *Client) logger() bakery.Logger { - if c.Logger != nil { - return c.Logger - } - return bakery.DefaultLogger("httpbakery") -} - -// waitForMacaroon returns a macaroon from a legacy wait endpoint. -func waitForMacaroon(ctx context.Context, client *Client, waitURL *url.URL) (*bakery.Macaroon, error) { - req, err := http.NewRequest("GET", waitURL.String(), nil) - if err != nil { - return nil, errgo.Mask(err) - } - req = req.WithContext(ctx) - httpResp, err := client.Client.Do(req) - if err != nil { - return nil, errgo.Notef(err, "cannot get %q", waitURL) - } - defer httpResp.Body.Close() - if httpResp.StatusCode != http.StatusOK { - err := unmarshalError(httpResp) - if err1, ok := err.(*Error); ok { - err = &DischargeError{ - Reason: err1, - } - } - return nil, errgo.NoteMask(err, "failed to acquire macaroon after waiting", errgo.Any) - } - var resp WaitResponse - if err := httprequest.UnmarshalJSONResponse(httpResp, &resp); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal wait response") - } - return resp.Macaroon, nil -} - -// relativeURL returns newPath relative to an original URL. -func relativeURL(base, new string) (*url.URL, error) { - if new == "" { - return nil, errgo.Newf("empty URL") - } - baseURL, err := url.Parse(base) - if err != nil { - return nil, errgo.Notef(err, "cannot parse URL") - } - newURL, err := url.Parse(new) - if err != nil { - return nil, errgo.Notef(err, "cannot parse URL") - } - return baseURL.ResolveReference(newURL), nil -} - -// TODO(rog) move a lot of the code below into server.go, as it's -// much more about server side than client side. - -// MacaroonsHeader is the key of the HTTP header that can be used to provide a -// macaroon for request authorization. -const MacaroonsHeader = "Macaroons" - -// RequestMacaroons returns any collections of macaroons from the header and -// cookies found in the request. By convention, each slice will contain a -// primary macaroon followed by its discharges. -func RequestMacaroons(req *http.Request) []macaroon.Slice { - mss := cookiesToMacaroons(req.Cookies()) - for _, h := range req.Header[MacaroonsHeader] { - ms, err := decodeMacaroonSlice(h) - if err != nil { - // Ignore invalid macaroons. - continue - } - mss = append(mss, ms) - } - return mss -} - -// cookiesToMacaroons returns a slice of any macaroons found -// in the given slice of cookies. -func cookiesToMacaroons(cookies []*http.Cookie) []macaroon.Slice { - var mss []macaroon.Slice - for _, cookie := range cookies { - if !strings.HasPrefix(cookie.Name, "macaroon-") { - continue - } - ms, err := decodeMacaroonSlice(cookie.Value) - if err != nil { - // Ignore invalid macaroons. - continue - } - mss = append(mss, ms) - } - return mss -} - -// decodeMacaroonSlice decodes a base64-JSON-encoded slice of macaroons from -// the given string. -func decodeMacaroonSlice(value string) (macaroon.Slice, error) { - data, err := macaroon.Base64Decode([]byte(value)) - if err != nil { - return nil, errgo.NoteMask(err, "cannot base64-decode macaroons") - } - // TODO(rog) accept binary encoded macaroon cookies. - var ms macaroon.Slice - if err := json.Unmarshal(data, &ms); err != nil { - return nil, errgo.NoteMask(err, "cannot unmarshal macaroons") - } - return ms, nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go deleted file mode 100644 index 6ae98530..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.7 - -package httpbakery - -import ( - "context" - "net/http" -) - -func contextFromRequest(req *http.Request) context.Context { - return req.Context() -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go deleted file mode 100644 index aecca0d3..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !go1.7 - -package httpbakery - -import ( - "context" - "net/http" -) - -func contextFromRequest(req *http.Request) context.Context { - return context.Background() -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go deleted file mode 100644 index fa88bfa1..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go +++ /dev/null @@ -1,367 +0,0 @@ -package httpbakery - -import ( - "context" - "encoding/base64" - "net/http" - "path" - "unicode/utf8" - - "github.com/julienschmidt/httprouter" - "gopkg.in/errgo.v1" - "gopkg.in/httprequest.v1" - "gopkg.in/macaroon.v2" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery" - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -// ThirdPartyCaveatChecker is used to check third party caveats. -// This interface is deprecated and included only for backward -// compatibility; ThirdPartyCaveatCheckerP should be used instead. -type ThirdPartyCaveatChecker interface { - // CheckThirdPartyCaveat is like ThirdPartyCaveatCheckerP.CheckThirdPartyCaveat - // except that it uses separate arguments instead of a struct arg. - CheckThirdPartyCaveat(ctx context.Context, info *bakery.ThirdPartyCaveatInfo, req *http.Request, token *DischargeToken) ([]checkers.Caveat, error) -} - -// ThirdPartyCaveatCheckerP is used to check third party caveats. -// The "P" stands for "Params" - this was added after ThirdPartyCaveatChecker -// which can't be removed without breaking backwards compatibility. -type ThirdPartyCaveatCheckerP interface { - // CheckThirdPartyCaveat is used to check whether a client - // making the given request should be allowed a discharge for - // the p.Info.Condition. On success, the caveat will be discharged, - // with any returned caveats also added to the discharge - // macaroon. - // - // The p.Token field, if non-nil, is a token obtained from - // Interactor.Interact as the result of a discharge interaction - // after an interaction required error. - // - // Note than when used in the context of a discharge handler - // created by Discharger, any returned errors will be marshaled - // as documented in DischargeHandler.ErrorMapper. - CheckThirdPartyCaveat(ctx context.Context, p ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error) -} - -// ThirdPartyCaveatCheckerParams holds the parameters passed to -// CheckThirdPartyCaveatP. -type ThirdPartyCaveatCheckerParams struct { - // Caveat holds information about the caveat being discharged. - Caveat *bakery.ThirdPartyCaveatInfo - - // Token holds the discharge token provided by the client, if any. - Token *DischargeToken - - // Req holds the HTTP discharge request. - Request *http.Request - - // Response holds the HTTP response writer. Implementations - // must not call its WriteHeader or Write methods. - Response http.ResponseWriter -} - -// ThirdPartyCaveatCheckerFunc implements ThirdPartyCaveatChecker -// by calling a function. -type ThirdPartyCaveatCheckerFunc func(ctx context.Context, req *http.Request, info *bakery.ThirdPartyCaveatInfo, token *DischargeToken) ([]checkers.Caveat, error) - -func (f ThirdPartyCaveatCheckerFunc) CheckThirdPartyCaveat(ctx context.Context, info *bakery.ThirdPartyCaveatInfo, req *http.Request, token *DischargeToken) ([]checkers.Caveat, error) { - return f(ctx, req, info, token) -} - -// ThirdPartyCaveatCheckerPFunc implements ThirdPartyCaveatCheckerP -// by calling a function. -type ThirdPartyCaveatCheckerPFunc func(ctx context.Context, p ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error) - -func (f ThirdPartyCaveatCheckerPFunc) CheckThirdPartyCaveat(ctx context.Context, p ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error) { - return f(ctx, p) -} - -// newDischargeClient returns a discharge client that addresses the -// third party discharger at the given location URL and uses -// the given client to make HTTP requests. -// -// If client is nil, http.DefaultClient is used. -func newDischargeClient(location string, client httprequest.Doer) *dischargeClient { - if client == nil { - client = http.DefaultClient - } - return &dischargeClient{ - Client: httprequest.Client{ - BaseURL: location, - Doer: client, - UnmarshalError: unmarshalError, - }, - } -} - -// Discharger holds parameters for creating a new Discharger. -type DischargerParams struct { - // CheckerP is used to actually check the caveats. - // This will be used in preference to Checker. - CheckerP ThirdPartyCaveatCheckerP - - // Checker is used to actually check the caveats. - // This should be considered deprecated and will be ignored if CheckerP is set. - Checker ThirdPartyCaveatChecker - - // Key holds the key pair of the discharger. - Key *bakery.KeyPair - - // Locator is used to find public keys when adding - // third-party caveats on discharge macaroons. - // If this is nil, no third party caveats may be added. - Locator bakery.ThirdPartyLocator - - // ErrorToResponse is used to convert errors returned by the third - // party caveat checker to the form that will be JSON-marshaled - // on the wire. If zero, this defaults to ErrorToResponse. - // If set, it should handle errors that it does not understand - // by falling back to calling ErrorToResponse to ensure - // that the standard bakery errors are marshaled in the expected way. - ErrorToResponse func(ctx context.Context, err error) (int, interface{}) -} - -// Discharger represents a third-party caveat discharger. -// can discharge caveats in an HTTP server. -// -// The name space served by dischargers is as follows. -// All parameters can be provided either as URL attributes -// or form attributes. The result is always formatted as a JSON -// object. -// -// On failure, all endpoints return an error described by -// the Error type. -// -// POST /discharge -// params: -// id: all-UTF-8 third party caveat id -// id64: non-padded URL-base64 encoded caveat id -// macaroon-id: (optional) id to give to discharge macaroon (defaults to id) -// token: (optional) value of discharge token -// token64: (optional) base64-encoded value of discharge token. -// token-kind: (mandatory if token or token64 provided) discharge token kind. -// result on success (http.StatusOK): -// { -// Macaroon *macaroon.Macaroon -// } -// -// GET /publickey -// result: -// public key of service -// expiry time of key -type Discharger struct { - p DischargerParams -} - -// NewDischarger returns a new third-party caveat discharger -// using the given parameters. -func NewDischarger(p DischargerParams) *Discharger { - if p.ErrorToResponse == nil { - p.ErrorToResponse = ErrorToResponse - } - if p.Locator == nil { - p.Locator = emptyLocator{} - } - if p.CheckerP == nil { - p.CheckerP = ThirdPartyCaveatCheckerPFunc(func(ctx context.Context, cp ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error) { - return p.Checker.CheckThirdPartyCaveat(ctx, cp.Caveat, cp.Request, cp.Token) - }) - } - return &Discharger{ - p: p, - } -} - -type emptyLocator struct{} - -func (emptyLocator) ThirdPartyInfo(ctx context.Context, loc string) (bakery.ThirdPartyInfo, error) { - return bakery.ThirdPartyInfo{}, bakery.ErrNotFound -} - -// AddMuxHandlers adds handlers to the given ServeMux to provide -// a third-party caveat discharge service. -func (d *Discharger) AddMuxHandlers(mux *http.ServeMux, rootPath string) { - for _, h := range d.Handlers() { - // Note: this only works because we don't have any wildcard - // patterns in the discharger paths. - mux.Handle(path.Join(rootPath, h.Path), mkHTTPHandler(h.Handle)) - } -} - -// Handlers returns a slice of handlers that can handle a third-party -// caveat discharge service when added to an httprouter.Router. -// TODO provide some way of customizing the context so that -// ErrorToResponse can see a request-specific context. -func (d *Discharger) Handlers() []httprequest.Handler { - f := func(p httprequest.Params) (dischargeHandler, context.Context, error) { - return dischargeHandler{ - discharger: d, - }, p.Context, nil - } - srv := httprequest.Server{ - ErrorMapper: d.p.ErrorToResponse, - } - return srv.Handlers(f) -} - -//go:generate httprequest-generate-client github.com/go-macaroon-bakery/macaroon-bakery/v3-unstable/httpbakery dischargeHandler dischargeClient - -// dischargeHandler is the type used to define the httprequest handler -// methods for a discharger. -type dischargeHandler struct { - discharger *Discharger -} - -// dischargeRequest is a request to create a macaroon that discharges the -// supplied third-party caveat. Discharging caveats will normally be -// handled by the bakery it would be unusual to use this type directly in -// client software. -type dischargeRequest struct { - httprequest.Route `httprequest:"POST /discharge"` - Id string `httprequest:"id,form,omitempty"` - Id64 string `httprequest:"id64,form,omitempty"` - Caveat string `httprequest:"caveat64,form,omitempty"` - Token string `httprequest:"token,form,omitempty"` - Token64 string `httprequest:"token64,form,omitempty"` - TokenKind string `httprequest:"token-kind,form,omitempty"` -} - -// dischargeResponse contains the response from a /discharge POST request. -type dischargeResponse struct { - Macaroon *bakery.Macaroon `json:",omitempty"` -} - -// Discharge discharges a third party caveat. -func (h dischargeHandler) Discharge(p httprequest.Params, r *dischargeRequest) (*dischargeResponse, error) { - id, err := maybeBase64Decode(r.Id, r.Id64) - if err != nil { - return nil, errgo.Notef(err, "bad caveat id") - } - var caveat []byte - if r.Caveat != "" { - // Note that it's important that when r.Caveat is empty, - // we leave DischargeParams.Caveat as nil (Base64Decode - // always returns a non-nil byte slice). - caveat1, err := macaroon.Base64Decode([]byte(r.Caveat)) - if err != nil { - return nil, errgo.Notef(err, "bad base64-encoded caveat: %v", err) - } - caveat = caveat1 - } - tokenVal, err := maybeBase64Decode(r.Token, r.Token64) - if err != nil { - return nil, errgo.Notef(err, "bad discharge token") - } - var token *DischargeToken - if len(tokenVal) != 0 { - if r.TokenKind == "" { - return nil, errgo.Notef(err, "discharge token provided without token kind") - } - token = &DischargeToken{ - Kind: r.TokenKind, - Value: tokenVal, - } - } - m, err := bakery.Discharge(p.Context, bakery.DischargeParams{ - Id: id, - Caveat: caveat, - Key: h.discharger.p.Key, - Checker: bakery.ThirdPartyCaveatCheckerFunc( - func(ctx context.Context, cav *bakery.ThirdPartyCaveatInfo) ([]checkers.Caveat, error) { - return h.discharger.p.CheckerP.CheckThirdPartyCaveat(ctx, ThirdPartyCaveatCheckerParams{ - Caveat: cav, - Request: p.Request, - Response: p.Response, - Token: token, - }) - }, - ), - Locator: h.discharger.p.Locator, - }) - if err != nil { - return nil, errgo.NoteMask(err, "cannot discharge", errgo.Any) - } - return &dischargeResponse{m}, nil -} - -// publicKeyRequest specifies the /publickey endpoint. -type publicKeyRequest struct { - httprequest.Route `httprequest:"GET /publickey"` -} - -// publicKeyResponse is the response to a /publickey GET request. -type publicKeyResponse struct { - PublicKey *bakery.PublicKey -} - -// dischargeInfoRequest specifies the /discharge/info endpoint. -type dischargeInfoRequest struct { - httprequest.Route `httprequest:"GET /discharge/info"` -} - -// dischargeInfoResponse is the response to a /discharge/info GET -// request. -type dischargeInfoResponse struct { - PublicKey *bakery.PublicKey - Version bakery.Version -} - -// PublicKey returns the public key of the discharge service. -func (h dischargeHandler) PublicKey(*publicKeyRequest) (publicKeyResponse, error) { - return publicKeyResponse{ - PublicKey: &h.discharger.p.Key.Public, - }, nil -} - -// DischargeInfo returns information on the discharger. -func (h dischargeHandler) DischargeInfo(*dischargeInfoRequest) (dischargeInfoResponse, error) { - return dischargeInfoResponse{ - PublicKey: &h.discharger.p.Key.Public, - Version: bakery.LatestVersion, - }, nil -} - -// mkHTTPHandler converts an httprouter handler to an http.Handler, -// assuming that the httprouter handler has no wildcard path -// parameters. -func mkHTTPHandler(h httprouter.Handle) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - h(w, req, nil) - }) -} - -// maybeBase64Encode encodes b as is if it's -// OK to be passed as a URL form parameter, -// or encoded as base64 otherwise. -func maybeBase64Encode(b []byte) (s, s64 string) { - if utf8.Valid(b) { - valid := true - for _, c := range b { - if c < 32 || c == 127 { - valid = false - break - } - } - if valid { - return string(b), "" - } - } - return "", base64.RawURLEncoding.EncodeToString(b) -} - -// maybeBase64Decode implements the inverse of maybeBase64Encode. -func maybeBase64Decode(s, s64 string) ([]byte, error) { - if s64 != "" { - data, err := macaroon.Base64Decode([]byte(s64)) - if err != nil { - return nil, errgo.Mask(err) - } - if len(data) == 0 { - return nil, nil - } - return data, nil - } - return []byte(s), nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go deleted file mode 100644 index 3a738f38..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go +++ /dev/null @@ -1,35 +0,0 @@ -// The code in this file was automatically generated by running httprequest-generate-client. -// DO NOT EDIT - -package httpbakery - -import ( - "context" - - "gopkg.in/httprequest.v1" -) - -type dischargeClient struct { - Client httprequest.Client -} - -// Discharge discharges a third party caveat. -func (c *dischargeClient) Discharge(ctx context.Context, p *dischargeRequest) (*dischargeResponse, error) { - var r *dischargeResponse - err := c.Client.Call(ctx, p, &r) - return r, err -} - -// DischargeInfo returns information on the discharger. -func (c *dischargeClient) DischargeInfo(ctx context.Context, p *dischargeInfoRequest) (dischargeInfoResponse, error) { - var r dischargeInfoResponse - err := c.Client.Call(ctx, p, &r) - return r, err -} - -// PublicKey returns the public key of the discharge service. -func (c *dischargeClient) PublicKey(ctx context.Context, p *publicKeyRequest) (publicKeyResponse, error) { - var r publicKeyResponse - err := c.Client.Call(ctx, p, &r) - return r, err -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go deleted file mode 100644 index 0ccc0794..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go +++ /dev/null @@ -1,359 +0,0 @@ -package httpbakery - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strconv" - - "gopkg.in/errgo.v1" - "gopkg.in/httprequest.v1" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery" - "github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil" -) - -// ErrorCode holds an error code that classifies -// an error returned from a bakery HTTP handler. -type ErrorCode string - -func (e ErrorCode) Error() string { - return string(e) -} - -func (e ErrorCode) ErrorCode() ErrorCode { - return e -} - -const ( - ErrBadRequest = ErrorCode("bad request") - ErrDischargeRequired = ErrorCode("macaroon discharge required") - ErrInteractionRequired = ErrorCode("interaction required") - ErrInteractionMethodNotFound = ErrorCode("discharger does not provide an supported interaction method") - ErrPermissionDenied = ErrorCode("permission denied") -) - -var httpReqServer = httprequest.Server{ - ErrorMapper: ErrorToResponse, -} - -// WriteError writes the given bakery error to w. -func WriteError(ctx context.Context, w http.ResponseWriter, err error) { - httpReqServer.WriteError(ctx, w, err) -} - -// Error holds the type of a response from an httpbakery HTTP request, -// marshaled as JSON. -// -// Note: Do not construct Error values with ErrDischargeRequired or -// ErrInteractionRequired codes directly - use the -// NewDischargeRequiredError or NewInteractionRequiredError -// functions instead. -type Error struct { - Code ErrorCode `json:",omitempty"` - Message string `json:",omitempty"` - Info *ErrorInfo `json:",omitempty"` - - // version holds the protocol version that was used - // to create the error (see NewDischargeRequiredError). - version bakery.Version -} - -// ErrorInfo holds additional information provided -// by an error. -type ErrorInfo struct { - // Macaroon may hold a macaroon that, when - // discharged, may allow access to a service. - // This field is associated with the ErrDischargeRequired - // error code. - Macaroon *bakery.Macaroon `json:",omitempty"` - - // MacaroonPath holds the URL path to be associated - // with the macaroon. The macaroon is potentially - // valid for all URLs under the given path. - // If it is empty, the macaroon will be associated with - // the original URL from which the error was returned. - MacaroonPath string `json:",omitempty"` - - // CookieNameSuffix holds the desired cookie name suffix to be - // associated with the macaroon. The actual name used will be - // ("macaroon-" + CookieName). Clients may ignore this field - - // older clients will always use ("macaroon-" + - // macaroon.Signature() in hex). - CookieNameSuffix string `json:",omitempty"` - - // The following fields are associated with the - // ErrInteractionRequired error code. - - // InteractionMethods holds the set of methods that the - // third party supports for completing the discharge. - // See InteractionMethod for a more convenient - // accessor method. - InteractionMethods map[string]*json.RawMessage `json:",omitempty"` - - // LegacyVisitURL holds a URL that the client should visit - // in a web browser to authenticate themselves. - // This is deprecated - it is superceded by the InteractionMethods - // field. - LegacyVisitURL string `json:"VisitURL,omitempty"` - - // LegacyWaitURL holds a URL that the client should visit - // to acquire the discharge macaroon. A GET on - // this URL will block until the client has authenticated, - // and then it will return the discharge macaroon. - // This is deprecated - it is superceded by the InteractionMethods - // field. - LegacyWaitURL string `json:"WaitURL,omitempty"` -} - -// SetInteraction sets the information for a particular -// interaction kind to v. The error should be an interaction-required -// error. This method will panic if v cannot be JSON-marshaled. -// It is expected that interaction implementations will -// implement type-safe wrappers for this method, -// so you should not need to call it directly. -func (e *Error) SetInteraction(kind string, v interface{}) { - if e.Info == nil { - e.Info = new(ErrorInfo) - } - if e.Info.InteractionMethods == nil { - e.Info.InteractionMethods = make(map[string]*json.RawMessage) - } - data, err := json.Marshal(v) - if err != nil { - panic(err) - } - m := json.RawMessage(data) - e.Info.InteractionMethods[kind] = &m -} - -// InteractionMethod checks whether the error is an InteractionRequired error -// that implements the method with the given name, and JSON-unmarshals the -// method-specific data into x. -func (e *Error) InteractionMethod(kind string, x interface{}) error { - if e.Info == nil || e.Code != ErrInteractionRequired { - return errgo.Newf("not an interaction-required error (code %v)", e.Code) - } - entry := e.Info.InteractionMethods[kind] - if entry == nil { - return errgo.WithCausef(nil, ErrInteractionMethodNotFound, "interaction method %q not found", kind) - } - if err := json.Unmarshal(*entry, x); err != nil { - return errgo.Notef(err, "cannot unmarshal data for interaction method %q", kind) - } - return nil -} - -func (e *Error) Error() string { - return e.Message -} - -func (e *Error) ErrorCode() ErrorCode { - return e.Code -} - -// ErrorInfo returns additional information -// about the error. -// TODO return interface{} here? -func (e *Error) ErrorInfo() *ErrorInfo { - return e.Info -} - -// ErrorToResponse returns the HTTP status and an error body to be -// marshaled as JSON for the given error. This allows a third party -// package to integrate bakery errors into their error responses when -// they encounter an error with a *bakery.Error cause. -func ErrorToResponse(ctx context.Context, err error) (int, interface{}) { - errorBody := errorResponseBody(err) - var body interface{} = errorBody - status := http.StatusInternalServerError - switch errorBody.Code { - case ErrBadRequest: - status = http.StatusBadRequest - case ErrPermissionDenied: - status = http.StatusUnauthorized - case ErrDischargeRequired, ErrInteractionRequired: - switch errorBody.version { - case bakery.Version0: - status = http.StatusProxyAuthRequired - case bakery.Version1, bakery.Version2, bakery.Version3: - status = http.StatusUnauthorized - body = httprequest.CustomHeader{ - Body: body, - SetHeaderFunc: setAuthenticateHeader, - } - default: - panic(fmt.Sprintf("out of range version number %v", errorBody.version)) - } - } - return status, body -} - -func setAuthenticateHeader(h http.Header) { - h.Set("WWW-Authenticate", "Macaroon") -} - -type errorInfoer interface { - ErrorInfo() *ErrorInfo -} - -type errorCoder interface { - ErrorCode() ErrorCode -} - -// errorResponse returns an appropriate error -// response for the provided error. -func errorResponseBody(err error) *Error { - var errResp Error - cause := errgo.Cause(err) - if cause, ok := cause.(*Error); ok { - // It's an Error already. Preserve the wrapped - // error message but copy everything else. - errResp = *cause - errResp.Message = err.Error() - return &errResp - } - - // It's not an error. Preserve as much info as - // we can find. - errResp.Message = err.Error() - if coder, ok := cause.(errorCoder); ok { - errResp.Code = coder.ErrorCode() - } - if infoer, ok := cause.(errorInfoer); ok { - errResp.Info = infoer.ErrorInfo() - } - return &errResp -} - -// NewInteractionRequiredError returns an error of type *Error -// that requests an interaction from the client in response -// to the given request. The originalErr value describes the original -// error - if it is nil, a default message will be provided. -// -// This function should be used in preference to creating the Error value -// directly, as it sets the bakery protocol version correctly in the error. -// -// The returned error does not support any interaction kinds. -// Use kind-specific SetInteraction methods (for example -// WebBrowserInteractor.SetInteraction) to add supported -// interaction kinds. -// -// Note that WebBrowserInteractor.SetInteraction should always be called -// for legacy clients to maintain backwards compatibility. -func NewInteractionRequiredError(originalErr error, req *http.Request) *Error { - if originalErr == nil { - originalErr = ErrInteractionRequired - } - return &Error{ - Message: originalErr.Error(), - version: RequestVersion(req), - Code: ErrInteractionRequired, - } -} - -type DischargeRequiredErrorParams struct { - // Macaroon holds the macaroon that needs to be discharged - // by the client. - Macaroon *bakery.Macaroon - - // OriginalError holds the reason that the discharge-required - // error was created. If it's nil, ErrDischargeRequired will - // be used. - OriginalError error - - // CookiePath holds the path for the client to give the cookie - // holding the discharged macaroon. If it's empty, then a - // relative path from the request URL path to / will be used if - // Request is provided, or "/" otherwise. - CookiePath string - - // CookieNameSuffix holds the suffix for the client - // to give the cookie holding the discharged macaroon - // (after the "macaroon-" prefix). - // If it's empty, "auth" will be used. - CookieNameSuffix string - - // Request holds the request that the error is in response to. - // It is used to form the cookie path if CookiePath is empty. - Request *http.Request -} - -// NewDischargeRequiredErrorWithVersion returns an error of type *Error -// that contains a macaroon to the client and acts as a request that the -// macaroon be discharged to authorize the request. -// -// The client is responsible for discharging the macaroon and -// storing it as a cookie (or including it as a Macaroon header) -// to be used for the subsequent request. -func NewDischargeRequiredError(p DischargeRequiredErrorParams) error { - if p.OriginalError == nil { - p.OriginalError = ErrDischargeRequired - } - if p.CookiePath == "" { - p.CookiePath = "/" - if p.Request != nil { - path, err := httputil.RelativeURLPath(p.Request.URL.Path, "/") - if err == nil { - p.CookiePath = path - } - } - } - if p.CookieNameSuffix == "" { - p.CookieNameSuffix = "auth" - } - return &Error{ - version: p.Macaroon.Version(), - Message: p.OriginalError.Error(), - Code: ErrDischargeRequired, - Info: &ErrorInfo{ - Macaroon: p.Macaroon, - MacaroonPath: p.CookiePath, - CookieNameSuffix: p.CookieNameSuffix, - }, - } -} - -// BakeryProtocolHeader is the header that HTTP clients should set -// to determine the bakery protocol version. If it is 0 or missing, -// a discharge-required error response will be returned with HTTP status 407; -// if it is 1, the response will have status 401 with the WWW-Authenticate -// header set to "Macaroon". -const BakeryProtocolHeader = "Bakery-Protocol-Version" - -// RequestVersion determines the bakery protocol version from a client -// request. If the protocol cannot be determined, or is invalid, the -// original version of the protocol is used. If a later version is -// found, the latest known version is used, which is OK because versions -// are backwardly compatible. -// -// TODO as there are no known version 0 clients, default to version 1 -// instead. -func RequestVersion(req *http.Request) bakery.Version { - vs := req.Header.Get(BakeryProtocolHeader) - if vs == "" { - // No header - use backward compatibility mode. - return bakery.Version0 - } - x, err := strconv.Atoi(vs) - if err != nil || x < 0 { - // Badly formed header - use backward compatibility mode. - return bakery.Version0 - } - v := bakery.Version(x) - if v > bakery.LatestVersion { - // Later version than we know about - use the - // latest version that we can. - return bakery.LatestVersion - } - return v -} - -func isDischargeRequiredError(err error) bool { - respErr, ok := errgo.Cause(err).(*Error) - if !ok { - return false - } - return respErr.Code == ErrDischargeRequired -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go deleted file mode 100644 index b22610bb..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go +++ /dev/null @@ -1,113 +0,0 @@ -package httpbakery - -import ( - "context" - "net/http" - "net/url" - - "gopkg.in/errgo.v1" - "gopkg.in/httprequest.v1" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery" -) - -var _ bakery.ThirdPartyLocator = (*ThirdPartyLocator)(nil) - -// NewThirdPartyLocator returns a new third party -// locator that uses the given client to find -// information about third parties and -// uses the given cache as a backing. -// -// If cache is nil, a new cache will be created. -// -// If client is nil, http.DefaultClient will be used. -func NewThirdPartyLocator(client httprequest.Doer, cache *bakery.ThirdPartyStore) *ThirdPartyLocator { - if cache == nil { - cache = bakery.NewThirdPartyStore() - } - if client == nil { - client = http.DefaultClient - } - return &ThirdPartyLocator{ - client: client, - cache: cache, - } -} - -// AllowInsecureThirdPartyLocator holds whether ThirdPartyLocator allows -// insecure HTTP connections for fetching third party information. -// It is provided for testing purposes and should not be used -// in production code. -var AllowInsecureThirdPartyLocator = false - -// ThirdPartyLocator represents locator that can interrogate -// third party discharge services for information. By default it refuses -// to use insecure URLs. -type ThirdPartyLocator struct { - client httprequest.Doer - allowInsecure bool - cache *bakery.ThirdPartyStore -} - -// AllowInsecure allows insecure URLs. This can be useful -// for testing purposes. See also AllowInsecureThirdPartyLocator. -func (kr *ThirdPartyLocator) AllowInsecure() { - kr.allowInsecure = true -} - -// ThirdPartyLocator implements bakery.ThirdPartyLocator -// by first looking in the backing cache and, if that fails, -// making an HTTP request to find the information associated -// with the given discharge location. -// -// It refuses to fetch information from non-HTTPS URLs. -func (kr *ThirdPartyLocator) ThirdPartyInfo(ctx context.Context, loc string) (bakery.ThirdPartyInfo, error) { - // If the cache has an entry in, we can use it regardless of URL scheme. - // This allows entries for notionally insecure URLs to be added by other means (for - // example via a config file). - info, err := kr.cache.ThirdPartyInfo(ctx, loc) - if err == nil { - return info, nil - } - u, err := url.Parse(loc) - if err != nil { - return bakery.ThirdPartyInfo{}, errgo.Notef(err, "invalid discharge URL %q", loc) - } - if u.Scheme != "https" && !kr.allowInsecure && !AllowInsecureThirdPartyLocator { - return bakery.ThirdPartyInfo{}, errgo.Newf("untrusted discharge URL %q", loc) - } - info, err = ThirdPartyInfoForLocation(ctx, kr.client, loc) - if err != nil { - return bakery.ThirdPartyInfo{}, errgo.Mask(err) - } - kr.cache.AddInfo(loc, info) - return info, nil -} - -// ThirdPartyInfoForLocation returns information on the third party -// discharge server running at the given location URL. Note that this is -// insecure if an http: URL scheme is used. If client is nil, -// http.DefaultClient will be used. -func ThirdPartyInfoForLocation(ctx context.Context, client httprequest.Doer, url string) (bakery.ThirdPartyInfo, error) { - dclient := newDischargeClient(url, client) - info, err := dclient.DischargeInfo(ctx, &dischargeInfoRequest{}) - if err == nil { - return bakery.ThirdPartyInfo{ - PublicKey: *info.PublicKey, - Version: info.Version, - }, nil - } - derr, ok := errgo.Cause(err).(*httprequest.DecodeResponseError) - if !ok || derr.Response.StatusCode != http.StatusNotFound { - return bakery.ThirdPartyInfo{}, errgo.Mask(err) - } - // The new endpoint isn't there, so try the old one. - pkResp, err := dclient.PublicKey(ctx, &publicKeyRequest{}) - if err != nil { - return bakery.ThirdPartyInfo{}, errgo.Mask(err) - } - return bakery.ThirdPartyInfo{ - PublicKey: *pkResp.PublicKey, - Version: bakery.Version1, - }, nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go deleted file mode 100644 index c301ad13..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go +++ /dev/null @@ -1,88 +0,0 @@ -package httpbakery - -import ( - "context" - "net/http" - "time" - - "gopkg.in/errgo.v1" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery" - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers" -) - -// Oven is like bakery.Oven except it provides a method for -// translating errors returned by bakery.AuthChecker into -// errors suitable for passing to WriteError. -type Oven struct { - // Oven holds the bakery Oven used to create - // new macaroons to put in discharge-required errors. - *bakery.Oven - - // AuthnExpiry holds the expiry time of macaroons that - // are created for authentication. As these are generally - // applicable to all endpoints in an API, this is usually - // longer than AuthzExpiry. If this is zero, DefaultAuthnExpiry - // will be used. - AuthnExpiry time.Duration - - // AuthzExpiry holds the expiry time of macaroons that are - // created for authorization. As these are generally applicable - // to specific operations, they generally don't need - // a long lifespan, so this is usually shorter than AuthnExpiry. - // If this is zero, DefaultAuthzExpiry will be used. - AuthzExpiry time.Duration -} - -// Default expiry times for macaroons created by Oven.Error. -const ( - DefaultAuthnExpiry = 7 * 24 * time.Hour - DefaultAuthzExpiry = 5 * time.Minute -) - -// Error processes an error as returned from bakery.AuthChecker -// into an error suitable for returning as a response to req -// with WriteError. -// -// Specifically, it translates bakery.ErrPermissionDenied into -// ErrPermissionDenied and bakery.DischargeRequiredError -// into an Error with an ErrDischargeRequired code, using -// oven.Oven to mint the macaroon in it. -func (oven *Oven) Error(ctx context.Context, req *http.Request, err error) error { - cause := errgo.Cause(err) - if cause == bakery.ErrPermissionDenied { - return errgo.WithCausef(err, ErrPermissionDenied, "") - } - derr, ok := cause.(*bakery.DischargeRequiredError) - if !ok { - return errgo.Mask(err) - } - // TODO it's possible to have more than two levels here - think - // about some naming scheme for the cookies that allows that. - expiryDuration := oven.AuthzExpiry - if expiryDuration == 0 { - expiryDuration = DefaultAuthzExpiry - } - cookieName := "authz" - if derr.ForAuthentication { - // Authentication macaroons are a bit different, so use - // a different cookie name so both can be presented together. - cookieName = "authn" - expiryDuration = oven.AuthnExpiry - if expiryDuration == 0 { - expiryDuration = DefaultAuthnExpiry - } - } - m, err := oven.Oven.NewMacaroon(ctx, RequestVersion(req), derr.Caveats, derr.Ops...) - if err != nil { - return errgo.Notef(err, "cannot mint new macaroon") - } - if err := m.AddCaveat(ctx, checkers.TimeBeforeCaveat(time.Now().Add(expiryDuration)), nil, nil); err != nil { - return errgo.Notef(err, "cannot add time-before caveat") - } - return NewDischargeRequiredError(DischargeRequiredErrorParams{ - Macaroon: m, - CookieNameSuffix: cookieName, - Request: req, - }) -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go deleted file mode 100644 index 2f936d7c..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go +++ /dev/null @@ -1,197 +0,0 @@ -package httpbakery - -import ( - "bytes" - "context" - "io" - "net/http" - "reflect" - "sync" - "sync/atomic" - - "gopkg.in/errgo.v1" -) - -// newRetrableRequest wraps an HTTP request so that it can -// be retried without incurring race conditions and reports -// whether the request can be retried. -// The client instance will be used to make the request -// when the do method is called. -// -// Because http.NewRequest often wraps its request bodies -// with ioutil.NopCloser, which hides whether the body is -// seekable, we extract the seeker from inside the nopCloser if -// possible. -// -// We also work around Go issue 12796 by preventing concurrent -// reads to the underlying reader after the request body has -// been closed by Client.Do. -// -// The returned value should be closed after use. -func newRetryableRequest(client *http.Client, req *http.Request) (*retryableRequest, bool) { - if req.Body == nil { - return &retryableRequest{ - client: client, - ref: 1, - req: req, - origCookie: req.Header.Get("Cookie"), - }, true - } - body := seekerFromBody(req.Body) - if body == nil { - return nil, false - } - return &retryableRequest{ - client: client, - ref: 1, - req: req, - body: body, - origCookie: req.Header.Get("Cookie"), - }, true -} - -type retryableRequest struct { - client *http.Client - ref int32 - origCookie string - body readSeekCloser - readStopper *readStopper - req *http.Request -} - -// do performs the HTTP request. -func (rreq *retryableRequest) do(ctx context.Context) (*http.Response, error) { - req, err := rreq.prepare() - if err != nil { - return nil, errgo.Mask(err) - } - return rreq.client.Do(req.WithContext(ctx)) -} - -// prepare returns a new HTTP request object -// by copying the original request and seeking -// back to the start of the original body if needed. -// -// It needs to make a copy of the request because -// the HTTP code can access the Request.Body field -// after Client.Do has returned, which means we can't -// replace it for the second request. -func (rreq *retryableRequest) prepare() (*http.Request, error) { - req := new(http.Request) - *req = *rreq.req - // Make sure that the original cookie header is still in place - // so that we only end up with the cookies that are actually - // added by the HTTP cookie logic, and not the ones that were - // added in previous requests too. - req.Header.Set("Cookie", rreq.origCookie) - if rreq.body == nil { - // No need for any of the seek shenanigans. - return req, nil - } - if rreq.readStopper != nil { - // We've made a previous request. Close its request - // body so it can't interfere with the new request's body - // and then seek back to the start. - rreq.readStopper.Close() - if _, err := rreq.body.Seek(0, 0); err != nil { - return nil, errgo.Notef(err, "cannot seek to start of request body") - } - } - atomic.AddInt32(&rreq.ref, 1) - // Replace the request body with a new readStopper so that - // we can stop a second request from interfering with current - // request's body. - rreq.readStopper = &readStopper{ - rreq: rreq, - r: rreq.body, - } - req.Body = rreq.readStopper - return req, nil -} - -// close closes the request. It closes the underlying reader -// when all references have gone. -func (req *retryableRequest) close() error { - if atomic.AddInt32(&req.ref, -1) == 0 && req.body != nil { - // We've closed it for the last time, so actually close - // the original body. - return req.body.Close() - } - return nil -} - -// readStopper works around an issue with the net/http -// package (see http://golang.org/issue/12796). -// Because the first HTTP request might not have finished -// reading from its body when it returns, we need to -// ensure that the second request does not race on Read, -// so this type implements a Reader that prevents all Read -// calls to the underlying Reader after Close has been called. -type readStopper struct { - rreq *retryableRequest - mu sync.Mutex - r io.ReadSeeker -} - -func (r *readStopper) Read(buf []byte) (int, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.r == nil { - // Note: we have to use io.EOF here because otherwise - // another connection can in rare circumstances be - // polluted by the error returned here. Although this - // means the file may appear truncated to the server, - // that shouldn't matter because the body will only - // be closed after the server has replied. - return 0, io.EOF - } - return r.r.Read(buf) -} - -func (r *readStopper) Close() error { - r.mu.Lock() - alreadyClosed := r.r == nil - r.r = nil - r.mu.Unlock() - if alreadyClosed { - return nil - } - return r.rreq.close() -} - -var nopCloserType = reflect.TypeOf(io.NopCloser(nil)) -var nopCloserWriterToType = reflect.TypeOf(io.NopCloser(bytes.NewReader([]byte{}))) - -type readSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// seekerFromBody tries to obtain a seekable reader -// from the given request body. -func seekerFromBody(r io.ReadCloser) readSeekCloser { - if r, ok := r.(readSeekCloser); ok { - return r - } - rv := reflect.ValueOf(r) - if rv.Type() != nopCloserType && rv.Type() != nopCloserWriterToType { - return nil - } - // It's a value created by nopCloser. Extract the - // underlying Reader. Note that this works - // because the ioutil.nopCloser type exports - // its Reader field. - rs, ok := rv.Field(0).Interface().(io.ReadSeeker) - if !ok { - return nil - } - return readSeekerWithNopClose{rs} -} - -type readSeekerWithNopClose struct { - io.ReadSeeker -} - -func (r readSeekerWithNopClose) Close() error { - return nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go deleted file mode 100644 index 047ebbad..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go +++ /dev/null @@ -1,68 +0,0 @@ -package httpbakery - -import ( - "context" - "net/http" - "net/url" - - "gopkg.in/errgo.v1" - "gopkg.in/httprequest.v1" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery" -) - -// TODO(rog) rename this file. - -// legacyGetInteractionMethods queries a URL as found in an -// ErrInteractionRequired VisitURL field to find available interaction -// methods. -// -// It does this by sending a GET request to the URL with the Accept -// header set to "application/json" and parsing the resulting -// response as a map[string]string. -// -// It uses the given Doer to execute the HTTP GET request. -func legacyGetInteractionMethods(ctx context.Context, logger bakery.Logger, client httprequest.Doer, u *url.URL) map[string]*url.URL { - methodURLs, err := legacyGetInteractionMethods1(ctx, client, u) - if err != nil { - // When a discharger doesn't support retrieving interaction methods, - // we expect to get an error, because it's probably returning an HTML - // page not JSON. - if logger != nil { - logger.Debugf(ctx, "ignoring error: cannot get interaction methods: %v; %s", err, errgo.Details(err)) - } - methodURLs = make(map[string]*url.URL) - } - if methodURLs["interactive"] == nil { - // There's no "interactive" method returned, but we know - // the server does actually support it, because all dischargers - // are required to, so fill it in with the original URL. - methodURLs["interactive"] = u - } - return methodURLs -} - -func legacyGetInteractionMethods1(ctx context.Context, client httprequest.Doer, u *url.URL) (map[string]*url.URL, error) { - httpReqClient := &httprequest.Client{ - Doer: client, - } - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, errgo.Notef(err, "cannot create request") - } - req.Header.Set("Accept", "application/json") - var methodURLStrs map[string]string - if err := httpReqClient.Do(ctx, req, &methodURLStrs); err != nil { - return nil, errgo.Mask(err) - } - // Make all the URLs relative to the request URL. - methodURLs := make(map[string]*url.URL) - for m, urlStr := range methodURLStrs { - relURL, err := url.Parse(urlStr) - if err != nil { - return nil, errgo.Notef(err, "invalid URL for interaction method %q", m) - } - methodURLs[m] = u.ResolveReference(relURL) - } - return methodURLs, nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go deleted file mode 100644 index a9431fa6..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -// Note: this code was copied from github.com/juju/utils. - -// Package httputil holds utility functions related to net/http. -package httputil - -import ( - "errors" - "strings" -) - -// RelativeURLPath returns a relative URL path that is lexically -// equivalent to targpath when interpreted by url.URL.ResolveReference. -// On success, the returned path will always be non-empty and relative -// to basePath, even if basePath and targPath share no elements. -// -// It is assumed that both basePath and targPath are normalized -// (have no . or .. elements). -// -// An error is returned if basePath or targPath are not absolute paths. -func RelativeURLPath(basePath, targPath string) (string, error) { - if !strings.HasPrefix(basePath, "/") { - return "", errors.New("non-absolute base URL") - } - if !strings.HasPrefix(targPath, "/") { - return "", errors.New("non-absolute target URL") - } - baseParts := strings.Split(basePath, "/") - targParts := strings.Split(targPath, "/") - - // For the purposes of dotdot, the last element of - // the paths are irrelevant. We save the last part - // of the target path for later. - lastElem := targParts[len(targParts)-1] - baseParts = baseParts[0 : len(baseParts)-1] - targParts = targParts[0 : len(targParts)-1] - - // Find the common prefix between the two paths: - var i int - for ; i < len(baseParts); i++ { - if i >= len(targParts) || baseParts[i] != targParts[i] { - break - } - } - dotdotCount := len(baseParts) - i - targOnly := targParts[i:] - result := make([]string, 0, dotdotCount+len(targOnly)+1) - for i := 0; i < dotdotCount; i++ { - result = append(result, "..") - } - result = append(result, targOnly...) - result = append(result, lastElem) - final := strings.Join(result, "/") - if final == "" { - // If the final result is empty, the last element must - // have been empty, so the target was slash terminated - // and there were no previous elements, so "." - // is appropriate. - final = "." - } - return final, nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE b/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE deleted file mode 100644 index 67c4fb56..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE +++ /dev/null @@ -1,187 +0,0 @@ -Copyright © 2014, Roger Peppe, Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md b/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md deleted file mode 100644 index 4d03b8a8..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Macaroon ID Protocol Buffers - -This module defines the serialization format of macaroon identifiers for -macaroons created by the macaroon-bakery. For the most part this encoding -is considered an internal implementation detail of the macaroon-bakery -and external applications should not rely on any of the details of this -encoding being maintained between different bakery versions. - -This is broken out into a separate module as the protobuf implementation -works in such a way that one cannot have multiple definitions of a -message in any particular application's dependency tree. This module -therefore provides a common definition for use by multiple versions of -the macaroon-bakery to facilitate easier migration in client applications. diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go deleted file mode 100644 index f7ddc18b..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package macaroonpb defines the serialization details of macaroon ids -// used in the macaroon-bakery. -package macaroonpb - -import ( - "github.com/golang/protobuf/proto" -) - -//go:generate protoc --go_out . id.proto - -// MarshalBinary implements encoding.BinaryMarshal. -func (id *MacaroonId) MarshalBinary() ([]byte, error) { - return proto.Marshal(id) -} - -// UnmarshalBinary implements encoding.UnmarshalBinary. -func (id *MacaroonId) UnmarshalBinary(data []byte) error { - return proto.Unmarshal(data, id) -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go deleted file mode 100644 index 41b69d9d..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go +++ /dev/null @@ -1,238 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.23.0 -// protoc v3.12.3 -// source: id.proto - -package macaroonpb - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -type MacaroonId struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Nonce []byte `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"` - StorageId []byte `protobuf:"bytes,2,opt,name=storageId,proto3" json:"storageId,omitempty"` - Ops []*Op `protobuf:"bytes,3,rep,name=ops,proto3" json:"ops,omitempty"` -} - -func (x *MacaroonId) Reset() { - *x = MacaroonId{} - if protoimpl.UnsafeEnabled { - mi := &file_id_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MacaroonId) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MacaroonId) ProtoMessage() {} - -func (x *MacaroonId) ProtoReflect() protoreflect.Message { - mi := &file_id_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MacaroonId.ProtoReflect.Descriptor instead. -func (*MacaroonId) Descriptor() ([]byte, []int) { - return file_id_proto_rawDescGZIP(), []int{0} -} - -func (x *MacaroonId) GetNonce() []byte { - if x != nil { - return x.Nonce - } - return nil -} - -func (x *MacaroonId) GetStorageId() []byte { - if x != nil { - return x.StorageId - } - return nil -} - -func (x *MacaroonId) GetOps() []*Op { - if x != nil { - return x.Ops - } - return nil -} - -type Op struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` - Actions []string `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` -} - -func (x *Op) Reset() { - *x = Op{} - if protoimpl.UnsafeEnabled { - mi := &file_id_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Op) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Op) ProtoMessage() {} - -func (x *Op) ProtoReflect() protoreflect.Message { - mi := &file_id_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Op.ProtoReflect.Descriptor instead. -func (*Op) Descriptor() ([]byte, []int) { - return file_id_proto_rawDescGZIP(), []int{1} -} - -func (x *Op) GetEntity() string { - if x != nil { - return x.Entity - } - return "" -} - -func (x *Op) GetActions() []string { - if x != nil { - return x.Actions - } - return nil -} - -var File_id_proto protoreflect.FileDescriptor - -var file_id_proto_rawDesc = []byte{ - 0x0a, 0x08, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x0a, 0x4d, 0x61, - 0x63, 0x61, 0x72, 0x6f, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x15, 0x0a, 0x03, - 0x6f, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x03, 0x2e, 0x4f, 0x70, 0x52, 0x03, - 0x6f, 0x70, 0x73, 0x22, 0x36, 0x0a, 0x02, 0x4f, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0e, 0x5a, 0x0c, 0x2e, - 0x3b, 0x6d, 0x61, 0x63, 0x61, 0x72, 0x6f, 0x6f, 0x6e, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_id_proto_rawDescOnce sync.Once - file_id_proto_rawDescData = file_id_proto_rawDesc -) - -func file_id_proto_rawDescGZIP() []byte { - file_id_proto_rawDescOnce.Do(func() { - file_id_proto_rawDescData = protoimpl.X.CompressGZIP(file_id_proto_rawDescData) - }) - return file_id_proto_rawDescData -} - -var file_id_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_id_proto_goTypes = []interface{}{ - (*MacaroonId)(nil), // 0: MacaroonId - (*Op)(nil), // 1: Op -} -var file_id_proto_depIdxs = []int32{ - 1, // 0: MacaroonId.ops:type_name -> Op - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_id_proto_init() } -func file_id_proto_init() { - if File_id_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_id_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MacaroonId); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_id_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Op); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_id_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_id_proto_goTypes, - DependencyIndexes: file_id_proto_depIdxs, - MessageInfos: file_id_proto_msgTypes, - }.Build() - File_id_proto = out.File - file_id_proto_rawDesc = nil - file_id_proto_goTypes = nil - file_id_proto_depIdxs = nil -} diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto deleted file mode 100644 index bfe891ee..00000000 --- a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax="proto3"; - -option go_package = ".;macaroonpb"; - -message MacaroonId { - bytes nonce = 1; - bytes storageId = 2; - repeated Op ops = 3; -} - -message Op { - string entity = 1; - repeated string actions = 2; -} diff --git a/vendor/github.com/juju/webbrowser/.gitignore b/vendor/github.com/juju/webbrowser/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/juju/webbrowser/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/juju/webbrowser/LICENSE b/vendor/github.com/juju/webbrowser/LICENSE deleted file mode 100644 index 65c5ca88..00000000 --- a/vendor/github.com/juju/webbrowser/LICENSE +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/juju/webbrowser/README.md b/vendor/github.com/juju/webbrowser/README.md deleted file mode 100644 index a666d3fd..00000000 --- a/vendor/github.com/juju/webbrowser/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# webbrowser -Go helpers for interacting with Web browsers. diff --git a/vendor/github.com/juju/webbrowser/webbrowser.go b/vendor/github.com/juju/webbrowser/webbrowser.go deleted file mode 100644 index 0a19c413..00000000 --- a/vendor/github.com/juju/webbrowser/webbrowser.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package webbrowser - -import ( - "errors" - "net/url" - "os" - "os/exec" - "runtime" - "strings" -) - -// Open opens a web browser at the given URL. -// If the OS is not recognized, an ErrNoBrowser is returned. -func Open(url *url.URL) error { - var args []string - if runtime.GOOS == "windows" { - // Windows is special because the start command is built into cmd.exe - // and hence requires the argument to be quoted. - args = []string{"cmd", "/c", "start", winCmdQuote.Replace(url.String())} - } else if b := browser[runtime.GOOS]; b != "" { - args = []string{b, url.String()} - } else { - return ErrNoBrowser - } - cmd := exec.Command(args[0], args[1:]...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Start() - go cmd.Wait() - return nil -} - -// ErrNoBrowser is returned when a browser cannot be found for the current OS. -var ErrNoBrowser = errors.New("cannot find a browser to open the web page") - -var browser = map[string]string{ - "darwin": "open", - "freebsd": "xdg-open", - "linux": "xdg-open", - "netbsd": "xdg-open", - "openbsd": "xdg-open", -} - -// winCmdQuote can quote metacharacters special to the Windows -// cmd.exe command interpreter. It does that by inserting -// a '^' character before each metacharacter. Note that -// most of these cannot actually be produced by URL.String, -// but we include them for completeness. -var winCmdQuote = strings.NewReplacer( - "&", "^&", - "%", "^%", - "(", "^(", - ")", "^)", - "^", "^^", - "<", "^<", - ">", "^>", - "|", "^|", -) diff --git a/vendor/github.com/julienschmidt/httprouter/.travis.yml b/vendor/github.com/julienschmidt/httprouter/.travis.yml deleted file mode 100644 index ffacfb5d..00000000 --- a/vendor/github.com/julienschmidt/httprouter/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false -language: go -go: - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - master -before_install: - - go get github.com/mattn/goveralls -script: - - go test -v -covermode=count -coverprofile=coverage.out - - go vet ./... - - test -z "$(gofmt -d -s . | tee /dev/stderr)" - - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/julienschmidt/httprouter/LICENSE b/vendor/github.com/julienschmidt/httprouter/LICENSE deleted file mode 100644 index 875308f5..00000000 --- a/vendor/github.com/julienschmidt/httprouter/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2013, Julien Schmidt -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/julienschmidt/httprouter/README.md b/vendor/github.com/julienschmidt/httprouter/README.md deleted file mode 100644 index d1e68b5e..00000000 --- a/vendor/github.com/julienschmidt/httprouter/README.md +++ /dev/null @@ -1,300 +0,0 @@ -# HttpRouter [![Build Status](https://travis-ci.org/julienschmidt/httprouter.svg?branch=master)](https://travis-ci.org/julienschmidt/httprouter) [![Coverage Status](https://coveralls.io/repos/github/julienschmidt/httprouter/badge.svg?branch=master)](https://coveralls.io/github/julienschmidt/httprouter?branch=master) [![GoDoc](https://godoc.org/github.com/julienschmidt/httprouter?status.svg)](http://godoc.org/github.com/julienschmidt/httprouter) - -HttpRouter is a lightweight high performance HTTP request router (also called *multiplexer* or just *mux* for short) for [Go](https://golang.org/). - -In contrast to the [default mux](https://golang.org/pkg/net/http/#ServeMux) of Go's `net/http` package, this router supports variables in the routing pattern and matches against the request method. It also scales better. - -The router is optimized for high performance and a small memory footprint. It scales well even with very long paths and a large number of routes. A compressing dynamic trie (radix tree) structure is used for efficient matching. - -## Features - -**Only explicit matches:** With other routers, like [`http.ServeMux`](https://golang.org/pkg/net/http/#ServeMux), a requested URL path could match multiple patterns. Therefore they have some awkward pattern priority rules, like *longest match* or *first registered, first matched*. By design of this router, a request can only match exactly one or no route. As a result, there are also no unintended matches, which makes it great for SEO and improves the user experience. - -**Stop caring about trailing slashes:** Choose the URL style you like, the router automatically redirects the client if a trailing slash is missing or if there is one extra. Of course it only does so, if the new path has a handler. If you don't like it, you can [turn off this behavior](https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash). - -**Path auto-correction:** Besides detecting the missing or additional trailing slash at no extra cost, the router can also fix wrong cases and remove superfluous path elements (like `../` or `//`). Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? HttpRouter can help him by making a case-insensitive look-up and redirecting him to the correct URL. - -**Parameters in your routing pattern:** Stop parsing the requested URL path, just give the path segment a name and the router delivers the dynamic value to you. Because of the design of the router, path parameters are very cheap. - -**Zero Garbage:** The matching and dispatching process generates zero bytes of garbage. The only heap allocations that are made are building the slice of the key-value pairs for path parameters, and building new context and request objects (the latter only in the standard `Handler`/`HandlerFunc` API). In the 3-argument API, if the request path contains no parameters not a single heap allocation is necessary. - -**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark). See below for technical details of the implementation. - -**No more server crashes:** You can set a [Panic handler](https://godoc.org/github.com/julienschmidt/httprouter#Router.PanicHandler) to deal with panics occurring during handling a HTTP request. The router then recovers and lets the `PanicHandler` log what happened and deliver a nice error page. - -**Perfect for APIs:** The router design encourages to build sensible, hierarchical RESTful APIs. Moreover it has built-in native support for [OPTIONS requests](http://zacstewart.com/2012/04/14/http-options-method.html) and `405 Method Not Allowed` replies. - -Of course you can also set **custom [`NotFound`](https://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) and [`MethodNotAllowed`](https://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](https://godoc.org/github.com/julienschmidt/httprouter#Router.ServeFiles). - -## Usage - -This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details. - -Let's start with a trivial example: - -```go -package main - -import ( - "fmt" - "net/http" - "log" - - "github.com/julienschmidt/httprouter" -) - -func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - fmt.Fprint(w, "Welcome!\n") -} - -func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) -} - -func main() { - router := httprouter.New() - router.GET("/", Index) - router.GET("/hello/:name", Hello) - - log.Fatal(http.ListenAndServe(":8080", router)) -} -``` - -### Named parameters - -As you can see, `:name` is a *named parameter*. The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method: `:name` can be retrieved by `ByName("name")`. - -When using a `http.Handler` (using `router.Handler` or `http.HandlerFunc`) instead of HttpRouter's handle API using a 3rd function parameter, the named parameters are stored in the `request.Context`. See more below under [Why doesn't this work with http.Handler?](#why-doesnt-this-work-with-httphandler). - -Named parameters only match a single path segment: - -``` -Pattern: /user/:user - - /user/gordon match - /user/you match - /user/gordon/profile no match - /user/ no match -``` - -**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other. - -### Catch-All parameters - -The second type are *catch-all* parameters and have the form `*name`. Like the name suggests, they match everything. Therefore they must always be at the **end** of the pattern: - -``` -Pattern: /src/*filepath - - /src/ match - /src/somefile.go match - /src/subdir/somefile.go match -``` - -## How does it work? - -The router relies on a tree structure which makes heavy use of *common prefixes*, it is basically a *compact* [*prefix tree*](https://en.wikipedia.org/wiki/Trie) (or just [*Radix tree*](https://en.wikipedia.org/wiki/Radix_tree)). Nodes with a common prefix also share a common parent. Here is a short example what the routing tree for the `GET` request method could look like: - -``` -Priority Path Handle -9 \ *<1> -3 ├s nil -2 |├earch\ *<2> -1 |└upport\ *<3> -2 ├blog\ *<4> -1 | └:post nil -1 | └\ *<5> -2 ├about-us\ *<6> -1 | └team\ *<7> -1 └contact\ *<8> -``` - -Every `*` represents the memory address of a handler function (a pointer). If you follow a path trough the tree from the root to the leaf, you get the complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder ([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a tree structure also allows us to use dynamic parts like the `:post` parameter, since we actually match against the routing patterns instead of just comparing hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark), this works very well and efficient. - -Since URL paths have a hierarchical structure and make use only of a limited set of characters (byte values), it is very likely that there are a lot of common prefixes. This allows us to easily reduce the routing into ever smaller problems. Moreover the router manages a separate tree for every request method. For one thing it is more space efficient than holding a method->handle map in every single node, it also allows us to greatly reduce the routing problem before even starting the look-up in the prefix-tree. - -For even better scalability, the child nodes on each tree level are ordered by priority, where the priority is just the number of handles registered in sub nodes (children, grandchildren, and so on..). This helps in two ways: - -1. Nodes which are part of the most routing paths are evaluated first. This helps to make as much routes as possible to be reachable as fast as possible. -2. It is some sort of cost compensation. The longest reachable path (highest cost) can always be evaluated first. The following scheme visualizes the tree structure. Nodes are evaluated from top to bottom and from left to right. - -``` -├------------ -├--------- -├----- -├---- -├-- -├-- -└- -``` - -## Why doesn't this work with `http.Handler`? - -**It does!** The router itself implements the `http.Handler` interface. Moreover the router provides convenient [adapters for `http.Handler`](https://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [`http.HandlerFunc`](https://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s which allows them to be used as a [`httprouter.Handle`](https://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route. - -Named parameters can be accessed `request.Context`: - -```go -func Hello(w http.ResponseWriter, r *http.Request) { - params := httprouter.ParamsFromContext(r.Context()) - - fmt.Fprintf(w, "hello, %s!\n", params.ByName("name")) -} -``` - -Alternatively, one can also use `params := r.Context().Value(httprouter.ParamsKey)` instead of the helper function. - -Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up. - -## Automatic OPTIONS responses and CORS - -One might wish to modify automatic responses to OPTIONS requests, e.g. to support [CORS preflight requests](https://developer.mozilla.org/en-US/docs/Glossary/preflight_request) or to set other headers. -This can be achieved using the [`Router.GlobalOPTIONS`](https://godoc.org/github.com/julienschmidt/httprouter#Router.GlobalOPTIONS) handler: - -```go -router.GlobalOPTIONS = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("Access-Control-Request-Method") != "" { - // Set CORS headers - header := w.Header() - header.Set("Access-Control-Allow-Methods", r.Header.Get("Allow")) - header.Set("Access-Control-Allow-Origin", "*") - } - - // Adjust status code to 204 - w.WriteHeader(http.StatusNoContent) -}) -``` - -## Where can I find Middleware *X*? - -This package just provides a very efficient request router with a few extra features. The router is just a [`http.Handler`](https://golang.org/pkg/net/http/#Handler), you can chain any http.Handler compatible middleware before the router, for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers). Or you could [just write your own](https://justinas.org/writing-http-middleware-in-go/), it's very easy! - -Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter). - -### Multi-domain / Sub-domains - -Here is a quick example: Does your server serve multiple domains / hosts? -You want to use sub-domains? -Define a router per host! - -```go -// We need an object that implements the http.Handler interface. -// Therefore we need a type for which we implement the ServeHTTP method. -// We just use a map here, in which we map host names (with port) to http.Handlers -type HostSwitch map[string]http.Handler - -// Implement the ServeHTTP method on our new type -func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Check if a http.Handler is registered for the given host. - // If yes, use it to handle the request. - if handler := hs[r.Host]; handler != nil { - handler.ServeHTTP(w, r) - } else { - // Handle host names for which no handler is registered - http.Error(w, "Forbidden", 403) // Or Redirect? - } -} - -func main() { - // Initialize a router as usual - router := httprouter.New() - router.GET("/", Index) - router.GET("/hello/:name", Hello) - - // Make a new HostSwitch and insert the router (our http handler) - // for example.com and port 12345 - hs := make(HostSwitch) - hs["example.com:12345"] = router - - // Use the HostSwitch to listen and serve on port 12345 - log.Fatal(http.ListenAndServe(":12345", hs)) -} -``` - -### Basic Authentication - -Another quick example: Basic Authentication (RFC 2617) for handles: - -```go -package main - -import ( - "fmt" - "log" - "net/http" - - "github.com/julienschmidt/httprouter" -) - -func BasicAuth(h httprouter.Handle, requiredUser, requiredPassword string) httprouter.Handle { - return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - // Get the Basic Authentication credentials - user, password, hasAuth := r.BasicAuth() - - if hasAuth && user == requiredUser && password == requiredPassword { - // Delegate request to the given handle - h(w, r, ps) - } else { - // Request Basic Authentication otherwise - w.Header().Set("WWW-Authenticate", "Basic realm=Restricted") - http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) - } - } -} - -func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - fmt.Fprint(w, "Not protected!\n") -} - -func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - fmt.Fprint(w, "Protected!\n") -} - -func main() { - user := "gordon" - pass := "secret!" - - router := httprouter.New() - router.GET("/", Index) - router.GET("/protected/", BasicAuth(Protected, user, pass)) - - log.Fatal(http.ListenAndServe(":8080", router)) -} -``` - -## Chaining with the NotFound handler - -**NOTE: It might be required to set [`Router.HandleMethodNotAllowed`](https://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.** - -You can use another [`http.Handler`](https://golang.org/pkg/net/http/#Handler), for example another router, to handle requests which could not be matched by this router by using the [`Router.NotFound`](https://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining. - -### Static files - -The `NotFound` handler can for example be used to serve static files from the root path `/` (like an `index.html` file along with other assets): - -```go -// Serve static files from the ./public directory -router.NotFound = http.FileServer(http.Dir("public")) -``` - -But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`. - -## Web Frameworks based on HttpRouter - -If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package: - -* [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework -* [api2go](https://github.com/manyminds/api2go): A JSON API Implementation for Go -* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance -* [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go -* [goMiddlewareChain](https://github.com/TobiEiss/goMiddlewareChain): An express.js-like-middleware-chain -* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine -* [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow -* [httpway](https://github.com/corneldamian/httpway): Simple middleware extension with context for httprouter and a server with gracefully shutdown support -* [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context -* [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba -* [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang -* [pbgo](https://github.com/chai2010/pbgo): pbgo is a mini RPC/REST framework based on Protobuf -* [River](https://github.com/abiosoft/river): River is a simple and lightweight REST server -* [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts -* [xmux](https://github.com/rs/xmux): xmux is a httprouter fork on top of xhandler (net/context aware) diff --git a/vendor/github.com/julienschmidt/httprouter/path.go b/vendor/github.com/julienschmidt/httprouter/path.go deleted file mode 100644 index 0331c7ec..00000000 --- a/vendor/github.com/julienschmidt/httprouter/path.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Based on the path package, Copyright 2009 The Go Authors. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package httprouter - -// CleanPath is the URL version of path.Clean, it returns a canonical URL path -// for p, eliminating . and .. elements. -// -// The following rules are applied iteratively until no further processing can -// be done: -// 1. Replace multiple slashes with a single slash. -// 2. Eliminate each . path name element (the current directory). -// 3. Eliminate each inner .. path name element (the parent directory) -// along with the non-.. element that precedes it. -// 4. Eliminate .. elements that begin a rooted path: -// that is, replace "/.." by "/" at the beginning of a path. -// -// If the result of this process is an empty string, "/" is returned -func CleanPath(p string) string { - // Turn empty string into "/" - if p == "" { - return "/" - } - - n := len(p) - var buf []byte - - // Invariants: - // reading from path; r is index of next byte to process. - // writing to buf; w is index of next byte to write. - - // path must start with '/' - r := 1 - w := 1 - - if p[0] != '/' { - r = 0 - buf = make([]byte, n+1) - buf[0] = '/' - } - - trailing := n > 1 && p[n-1] == '/' - - // A bit more clunky without a 'lazybuf' like the path package, but the loop - // gets completely inlined (bufApp). So in contrast to the path package this - // loop has no expensive function calls (except 1x make) - - for r < n { - switch { - case p[r] == '/': - // empty path element, trailing slash is added after the end - r++ - - case p[r] == '.' && r+1 == n: - trailing = true - r++ - - case p[r] == '.' && p[r+1] == '/': - // . element - r += 2 - - case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): - // .. element: remove to last / - r += 3 - - if w > 1 { - // can backtrack - w-- - - if buf == nil { - for w > 1 && p[w] != '/' { - w-- - } - } else { - for w > 1 && buf[w] != '/' { - w-- - } - } - } - - default: - // real path element. - // add slash if needed - if w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - // copy element - for r < n && p[r] != '/' { - bufApp(&buf, p, w, p[r]) - w++ - r++ - } - } - } - - // re-append trailing slash - if trailing && w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - if buf == nil { - return p[:w] - } - return string(buf[:w]) -} - -// internal helper to lazily create a buffer if necessary -func bufApp(buf *[]byte, s string, w int, c byte) { - if *buf == nil { - if s[w] == c { - return - } - - *buf = make([]byte, len(s)) - copy(*buf, s[:w]) - } - (*buf)[w] = c -} diff --git a/vendor/github.com/julienschmidt/httprouter/router.go b/vendor/github.com/julienschmidt/httprouter/router.go deleted file mode 100644 index 599529d1..00000000 --- a/vendor/github.com/julienschmidt/httprouter/router.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -// Package httprouter is a trie based high performance HTTP request router. -// -// A trivial example is: -// -// package main -// -// import ( -// "fmt" -// "github.com/julienschmidt/httprouter" -// "net/http" -// "log" -// ) -// -// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { -// fmt.Fprint(w, "Welcome!\n") -// } -// -// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { -// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) -// } -// -// func main() { -// router := httprouter.New() -// router.GET("/", Index) -// router.GET("/hello/:name", Hello) -// -// log.Fatal(http.ListenAndServe(":8080", router)) -// } -// -// The router matches incoming requests by the request method and the path. -// If a handle is registered for this path and method, the router delegates the -// request to that function. -// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to -// register handles, for all other methods router.Handle can be used. -// -// The registered path, against which the router matches incoming requests, can -// contain two types of parameters: -// Syntax Type -// :name named parameter -// *name catch-all parameter -// -// Named parameters are dynamic path segments. They match anything until the -// next '/' or the path end: -// Path: /blog/:category/:post -// -// Requests: -// /blog/go/request-routers match: category="go", post="request-routers" -// /blog/go/request-routers/ no match, but the router would redirect -// /blog/go/ no match -// /blog/go/request-routers/comments no match -// -// Catch-all parameters match anything until the path end, including the -// directory index (the '/' before the catch-all). Since they match anything -// until the end, catch-all parameters must always be the final path element. -// Path: /files/*filepath -// -// Requests: -// /files/ match: filepath="/" -// /files/LICENSE match: filepath="/LICENSE" -// /files/templates/article.html match: filepath="/templates/article.html" -// /files no match, but the router would redirect -// -// The value of parameters is saved as a slice of the Param struct, consisting -// each of a key and a value. The slice is passed to the Handle func as a third -// parameter. -// There are two ways to retrieve the value of a parameter: -// // by the name of the parameter -// user := ps.ByName("user") // defined by :user or *user -// -// // by the index of the parameter. This way you can also get the name (key) -// thirdKey := ps[2].Key // the name of the 3rd parameter -// thirdValue := ps[2].Value // the value of the 3rd parameter -package httprouter - -import ( - "context" - "net/http" - "strings" -) - -// Handle is a function that can be registered to a route to handle HTTP -// requests. Like http.HandlerFunc, but has a third parameter for the values of -// wildcards (variables). -type Handle func(http.ResponseWriter, *http.Request, Params) - -// Param is a single URL parameter, consisting of a key and a value. -type Param struct { - Key string - Value string -} - -// Params is a Param-slice, as returned by the router. -// The slice is ordered, the first URL parameter is also the first slice value. -// It is therefore safe to read values by the index. -type Params []Param - -// ByName returns the value of the first Param which key matches the given name. -// If no matching Param is found, an empty string is returned. -func (ps Params) ByName(name string) string { - for i := range ps { - if ps[i].Key == name { - return ps[i].Value - } - } - return "" -} - -type paramsKey struct{} - -// ParamsKey is the request context key under which URL params are stored. -var ParamsKey = paramsKey{} - -// ParamsFromContext pulls the URL parameters from a request context, -// or returns nil if none are present. -func ParamsFromContext(ctx context.Context) Params { - p, _ := ctx.Value(ParamsKey).(Params) - return p -} - -// Router is a http.Handler which can be used to dispatch requests to different -// handler functions via configurable routes -type Router struct { - trees map[string]*node - - // Enables automatic redirection if the current route can't be matched but a - // handler for the path with (without) the trailing slash exists. - // For example if /foo/ is requested but a route only exists for /foo, the - // client is redirected to /foo with http status code 301 for GET requests - // and 307 for all other request methods. - RedirectTrailingSlash bool - - // If enabled, the router tries to fix the current request path, if no - // handle is registered for it. - // First superfluous path elements like ../ or // are removed. - // Afterwards the router does a case-insensitive lookup of the cleaned path. - // If a handle can be found for this route, the router makes a redirection - // to the corrected path with status code 301 for GET requests and 307 for - // all other request methods. - // For example /FOO and /..//Foo could be redirected to /foo. - // RedirectTrailingSlash is independent of this option. - RedirectFixedPath bool - - // If enabled, the router checks if another method is allowed for the - // current route, if the current request can not be routed. - // If this is the case, the request is answered with 'Method Not Allowed' - // and HTTP status code 405. - // If no other Method is allowed, the request is delegated to the NotFound - // handler. - HandleMethodNotAllowed bool - - // If enabled, the router automatically replies to OPTIONS requests. - // Custom OPTIONS handlers take priority over automatic replies. - HandleOPTIONS bool - - // An optional http.Handler that is called on automatic OPTIONS requests. - // The handler is only called if HandleOPTIONS is true and no OPTIONS - // handler for the specific path was set. - // The "Allowed" header is set before calling the handler. - GlobalOPTIONS http.Handler - - // Cached value of global (*) allowed methods - globalAllowed string - - // Configurable http.Handler which is called when no matching route is - // found. If it is not set, http.NotFound is used. - NotFound http.Handler - - // Configurable http.Handler which is called when a request - // cannot be routed and HandleMethodNotAllowed is true. - // If it is not set, http.Error with http.StatusMethodNotAllowed is used. - // The "Allow" header with allowed request methods is set before the handler - // is called. - MethodNotAllowed http.Handler - - // Function to handle panics recovered from http handlers. - // It should be used to generate a error page and return the http error code - // 500 (Internal Server Error). - // The handler can be used to keep your server from crashing because of - // unrecovered panics. - PanicHandler func(http.ResponseWriter, *http.Request, interface{}) -} - -// Make sure the Router conforms with the http.Handler interface -var _ http.Handler = New() - -// New returns a new initialized Router. -// Path auto-correction, including trailing slashes, is enabled by default. -func New() *Router { - return &Router{ - RedirectTrailingSlash: true, - RedirectFixedPath: true, - HandleMethodNotAllowed: true, - HandleOPTIONS: true, - } -} - -// GET is a shortcut for router.Handle(http.MethodGet, path, handle) -func (r *Router) GET(path string, handle Handle) { - r.Handle(http.MethodGet, path, handle) -} - -// HEAD is a shortcut for router.Handle(http.MethodHead, path, handle) -func (r *Router) HEAD(path string, handle Handle) { - r.Handle(http.MethodHead, path, handle) -} - -// OPTIONS is a shortcut for router.Handle(http.MethodOptions, path, handle) -func (r *Router) OPTIONS(path string, handle Handle) { - r.Handle(http.MethodOptions, path, handle) -} - -// POST is a shortcut for router.Handle(http.MethodPost, path, handle) -func (r *Router) POST(path string, handle Handle) { - r.Handle(http.MethodPost, path, handle) -} - -// PUT is a shortcut for router.Handle(http.MethodPut, path, handle) -func (r *Router) PUT(path string, handle Handle) { - r.Handle(http.MethodPut, path, handle) -} - -// PATCH is a shortcut for router.Handle(http.MethodPatch, path, handle) -func (r *Router) PATCH(path string, handle Handle) { - r.Handle(http.MethodPatch, path, handle) -} - -// DELETE is a shortcut for router.Handle(http.MethodDelete, path, handle) -func (r *Router) DELETE(path string, handle Handle) { - r.Handle(http.MethodDelete, path, handle) -} - -// Handle registers a new request handle with the given path and method. -// -// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut -// functions can be used. -// -// This function is intended for bulk loading and to allow the usage of less -// frequently used, non-standardized or custom methods (e.g. for internal -// communication with a proxy). -func (r *Router) Handle(method, path string, handle Handle) { - if len(path) < 1 || path[0] != '/' { - panic("path must begin with '/' in path '" + path + "'") - } - - if r.trees == nil { - r.trees = make(map[string]*node) - } - - root := r.trees[method] - if root == nil { - root = new(node) - r.trees[method] = root - - r.globalAllowed = r.allowed("*", "") - } - - root.addRoute(path, handle) -} - -// Handler is an adapter which allows the usage of an http.Handler as a -// request handle. -// The Params are available in the request context under ParamsKey. -func (r *Router) Handler(method, path string, handler http.Handler) { - r.Handle(method, path, - func(w http.ResponseWriter, req *http.Request, p Params) { - if len(p) > 0 { - ctx := req.Context() - ctx = context.WithValue(ctx, ParamsKey, p) - req = req.WithContext(ctx) - } - handler.ServeHTTP(w, req) - }, - ) -} - -// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a -// request handle. -func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) { - r.Handler(method, path, handler) -} - -// ServeFiles serves files from the given file system root. -// The path must end with "/*filepath", files are then served from the local -// path /defined/root/dir/*filepath. -// For example if root is "/etc" and *filepath is "passwd", the local file -// "/etc/passwd" would be served. -// Internally a http.FileServer is used, therefore http.NotFound is used instead -// of the Router's NotFound handler. -// To use the operating system's file system implementation, -// use http.Dir: -// router.ServeFiles("/src/*filepath", http.Dir("/var/www")) -func (r *Router) ServeFiles(path string, root http.FileSystem) { - if len(path) < 10 || path[len(path)-10:] != "/*filepath" { - panic("path must end with /*filepath in path '" + path + "'") - } - - fileServer := http.FileServer(root) - - r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) { - req.URL.Path = ps.ByName("filepath") - fileServer.ServeHTTP(w, req) - }) -} - -func (r *Router) recv(w http.ResponseWriter, req *http.Request) { - if rcv := recover(); rcv != nil { - r.PanicHandler(w, req, rcv) - } -} - -// Lookup allows the manual lookup of a method + path combo. -// This is e.g. useful to build a framework around this router. -// If the path was found, it returns the handle function and the path parameter -// values. Otherwise the third return value indicates whether a redirection to -// the same path with an extra / without the trailing slash should be performed. -func (r *Router) Lookup(method, path string) (Handle, Params, bool) { - if root := r.trees[method]; root != nil { - return root.getValue(path) - } - return nil, nil, false -} - -func (r *Router) allowed(path, reqMethod string) (allow string) { - allowed := make([]string, 0, 9) - - if path == "*" { // server-wide - // empty method is used for internal calls to refresh the cache - if reqMethod == "" { - for method := range r.trees { - if method == http.MethodOptions { - continue - } - // Add request method to list of allowed methods - allowed = append(allowed, method) - } - } else { - return r.globalAllowed - } - } else { // specific path - for method := range r.trees { - // Skip the requested method - we already tried this one - if method == reqMethod || method == http.MethodOptions { - continue - } - - handle, _, _ := r.trees[method].getValue(path) - if handle != nil { - // Add request method to list of allowed methods - allowed = append(allowed, method) - } - } - } - - if len(allowed) > 0 { - // Add request method to list of allowed methods - allowed = append(allowed, http.MethodOptions) - - // Sort allowed methods. - // sort.Strings(allowed) unfortunately causes unnecessary allocations - // due to allowed being moved to the heap and interface conversion - for i, l := 1, len(allowed); i < l; i++ { - for j := i; j > 0 && allowed[j] < allowed[j-1]; j-- { - allowed[j], allowed[j-1] = allowed[j-1], allowed[j] - } - } - - // return as comma separated list - return strings.Join(allowed, ", ") - } - return -} - -// ServeHTTP makes the router implement the http.Handler interface. -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if r.PanicHandler != nil { - defer r.recv(w, req) - } - - path := req.URL.Path - - if root := r.trees[req.Method]; root != nil { - if handle, ps, tsr := root.getValue(path); handle != nil { - handle(w, req, ps) - return - } else if req.Method != http.MethodConnect && path != "/" { - code := 301 // Permanent redirect, request with GET method - if req.Method != http.MethodGet { - // Temporary redirect, request with same method - // As of Go 1.3, Go does not support status code 308. - code = 307 - } - - if tsr && r.RedirectTrailingSlash { - if len(path) > 1 && path[len(path)-1] == '/' { - req.URL.Path = path[:len(path)-1] - } else { - req.URL.Path = path + "/" - } - http.Redirect(w, req, req.URL.String(), code) - return - } - - // Try to fix the request path - if r.RedirectFixedPath { - fixedPath, found := root.findCaseInsensitivePath( - CleanPath(path), - r.RedirectTrailingSlash, - ) - if found { - req.URL.Path = string(fixedPath) - http.Redirect(w, req, req.URL.String(), code) - return - } - } - } - } - - if req.Method == http.MethodOptions && r.HandleOPTIONS { - // Handle OPTIONS requests - if allow := r.allowed(path, http.MethodOptions); allow != "" { - w.Header().Set("Allow", allow) - if r.GlobalOPTIONS != nil { - r.GlobalOPTIONS.ServeHTTP(w, req) - } - return - } - } else if r.HandleMethodNotAllowed { // Handle 405 - if allow := r.allowed(path, req.Method); allow != "" { - w.Header().Set("Allow", allow) - if r.MethodNotAllowed != nil { - r.MethodNotAllowed.ServeHTTP(w, req) - } else { - http.Error(w, - http.StatusText(http.StatusMethodNotAllowed), - http.StatusMethodNotAllowed, - ) - } - return - } - } - - // Handle 404 - if r.NotFound != nil { - r.NotFound.ServeHTTP(w, req) - } else { - http.NotFound(w, req) - } -} diff --git a/vendor/github.com/julienschmidt/httprouter/tree.go b/vendor/github.com/julienschmidt/httprouter/tree.go deleted file mode 100644 index c9fdf5b4..00000000 --- a/vendor/github.com/julienschmidt/httprouter/tree.go +++ /dev/null @@ -1,666 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package httprouter - -import ( - "strings" - "unicode" - "unicode/utf8" -) - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -const maxParamCount uint8 = ^uint8(0) - -func countParams(path string) uint8 { - var n uint - for i := 0; i < len(path); i++ { - if path[i] != ':' && path[i] != '*' { - continue - } - n++ - } - if n >= uint(maxParamCount) { - return maxParamCount - } - - return uint8(n) -} - -type nodeType uint8 - -const ( - static nodeType = iota // default - root - param - catchAll -) - -type node struct { - path string - wildChild bool - nType nodeType - maxParams uint8 - priority uint32 - indices string - children []*node - handle Handle -} - -// increments priority of the given child and reorders if necessary -func (n *node) incrementChildPrio(pos int) int { - n.children[pos].priority++ - prio := n.children[pos].priority - - // adjust position (move to front) - newPos := pos - for newPos > 0 && n.children[newPos-1].priority < prio { - // swap node positions - n.children[newPos-1], n.children[newPos] = n.children[newPos], n.children[newPos-1] - - newPos-- - } - - // build new index char string - if newPos != pos { - n.indices = n.indices[:newPos] + // unchanged prefix, might be empty - n.indices[pos:pos+1] + // the index char we move - n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' - } - - return newPos -} - -// addRoute adds a node with the given handle to the path. -// Not concurrency-safe! -func (n *node) addRoute(path string, handle Handle) { - fullPath := path - n.priority++ - numParams := countParams(path) - - // non-empty tree - if len(n.path) > 0 || len(n.children) > 0 { - walk: - for { - // Update maxParams of the current node - if numParams > n.maxParams { - n.maxParams = numParams - } - - // Find the longest common prefix. - // This also implies that the common prefix contains no ':' or '*' - // since the existing key can't contain those chars. - i := 0 - max := min(len(path), len(n.path)) - for i < max && path[i] == n.path[i] { - i++ - } - - // Split edge - if i < len(n.path) { - child := node{ - path: n.path[i:], - wildChild: n.wildChild, - nType: static, - indices: n.indices, - children: n.children, - handle: n.handle, - priority: n.priority - 1, - } - - // Update maxParams (max of all children) - for i := range child.children { - if child.children[i].maxParams > child.maxParams { - child.maxParams = child.children[i].maxParams - } - } - - n.children = []*node{&child} - // []byte for proper unicode char conversion, see #65 - n.indices = string([]byte{n.path[i]}) - n.path = path[:i] - n.handle = nil - n.wildChild = false - } - - // Make new node a child of this node - if i < len(path) { - path = path[i:] - - if n.wildChild { - n = n.children[0] - n.priority++ - - // Update maxParams of the child node - if numParams > n.maxParams { - n.maxParams = numParams - } - numParams-- - - // Check if the wildcard matches - if len(path) >= len(n.path) && n.path == path[:len(n.path)] && - // Adding a child to a catchAll is not possible - n.nType != catchAll && - // Check for longer wildcard, e.g. :name and :names - (len(n.path) >= len(path) || path[len(n.path)] == '/') { - continue walk - } else { - // Wildcard conflict - var pathSeg string - if n.nType == catchAll { - pathSeg = path - } else { - pathSeg = strings.SplitN(path, "/", 2)[0] - } - prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path - panic("'" + pathSeg + - "' in new path '" + fullPath + - "' conflicts with existing wildcard '" + n.path + - "' in existing prefix '" + prefix + - "'") - } - } - - c := path[0] - - // slash after param - if n.nType == param && c == '/' && len(n.children) == 1 { - n = n.children[0] - n.priority++ - continue walk - } - - // Check if a child with the next path byte exists - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - i = n.incrementChildPrio(i) - n = n.children[i] - continue walk - } - } - - // Otherwise insert it - if c != ':' && c != '*' { - // []byte for proper unicode char conversion, see #65 - n.indices += string([]byte{c}) - child := &node{ - maxParams: numParams, - } - n.children = append(n.children, child) - n.incrementChildPrio(len(n.indices) - 1) - n = child - } - n.insertChild(numParams, path, fullPath, handle) - return - - } else if i == len(path) { // Make node a (in-path) leaf - if n.handle != nil { - panic("a handle is already registered for path '" + fullPath + "'") - } - n.handle = handle - } - return - } - } else { // Empty tree - n.insertChild(numParams, path, fullPath, handle) - n.nType = root - } -} - -func (n *node) insertChild(numParams uint8, path, fullPath string, handle Handle) { - var offset int // already handled bytes of the path - - // find prefix until first wildcard (beginning with ':'' or '*'') - for i, max := 0, len(path); numParams > 0; i++ { - c := path[i] - if c != ':' && c != '*' { - continue - } - - // find wildcard end (either '/' or path end) - end := i + 1 - for end < max && path[end] != '/' { - switch path[end] { - // the wildcard name must not contain ':' and '*' - case ':', '*': - panic("only one wildcard per path segment is allowed, has: '" + - path[i:] + "' in path '" + fullPath + "'") - default: - end++ - } - } - - // check if this Node existing children which would be - // unreachable if we insert the wildcard here - if len(n.children) > 0 { - panic("wildcard route '" + path[i:end] + - "' conflicts with existing children in path '" + fullPath + "'") - } - - // check if the wildcard has a name - if end-i < 2 { - panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") - } - - if c == ':' { // param - // split path at the beginning of the wildcard - if i > 0 { - n.path = path[offset:i] - offset = i - } - - child := &node{ - nType: param, - maxParams: numParams, - } - n.children = []*node{child} - n.wildChild = true - n = child - n.priority++ - numParams-- - - // if the path doesn't end with the wildcard, then there - // will be another non-wildcard subpath starting with '/' - if end < max { - n.path = path[offset:end] - offset = end - - child := &node{ - maxParams: numParams, - priority: 1, - } - n.children = []*node{child} - n = child - } - - } else { // catchAll - if end != max || numParams > 1 { - panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") - } - - if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { - panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") - } - - // currently fixed width 1 for '/' - i-- - if path[i] != '/' { - panic("no / before catch-all in path '" + fullPath + "'") - } - - n.path = path[offset:i] - - // first node: catchAll node with empty path - child := &node{ - wildChild: true, - nType: catchAll, - maxParams: 1, - } - // update maxParams of the parent node - if n.maxParams < 1 { - n.maxParams = 1 - } - n.children = []*node{child} - n.indices = string(path[i]) - n = child - n.priority++ - - // second node: node holding the variable - child = &node{ - path: path[i:], - nType: catchAll, - maxParams: 1, - handle: handle, - priority: 1, - } - n.children = []*node{child} - - return - } - } - - // insert remaining path part and handle to the leaf - n.path = path[offset:] - n.handle = handle -} - -// Returns the handle registered with the given path (key). The values of -// wildcards are saved to a map. -// If no handle can be found, a TSR (trailing slash redirect) recommendation is -// made if a handle exists with an extra (without the) trailing slash for the -// given path. -func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) { -walk: // outer loop for walking the tree - for { - if len(path) > len(n.path) { - if path[:len(n.path)] == n.path { - path = path[len(n.path):] - // If this node does not have a wildcard (param or catchAll) - // child, we can just look up the next child node and continue - // to walk down the tree - if !n.wildChild { - c := path[0] - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - n = n.children[i] - continue walk - } - } - - // Nothing found. - // We can recommend to redirect to the same URL without a - // trailing slash if a leaf exists for that path. - tsr = (path == "/" && n.handle != nil) - return - - } - - // handle wildcard child - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - end := 0 - for end < len(path) && path[end] != '/' { - end++ - } - - // save param value - if p == nil { - // lazy allocation - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[1:] - p[i].Value = path[:end] - - // we need to go deeper! - if end < len(path) { - if len(n.children) > 0 { - path = path[end:] - n = n.children[0] - continue walk - } - - // ... but we can't - tsr = (len(path) == end+1) - return - } - - if handle = n.handle; handle != nil { - return - } else if len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists for TSR recommendation - n = n.children[0] - tsr = (n.path == "/" && n.handle != nil) - } - - return - - case catchAll: - // save param value - if p == nil { - // lazy allocation - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[2:] - p[i].Value = path - - handle = n.handle - return - - default: - panic("invalid node type") - } - } - } else if path == n.path { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if handle = n.handle; handle != nil { - return - } - - if path == "/" && n.wildChild && n.nType != root { - tsr = true - return - } - - // No handle found. Check if a handle for this path + a - // trailing slash exists for trailing slash recommendation - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - tsr = (len(n.path) == 1 && n.handle != nil) || - (n.nType == catchAll && n.children[0].handle != nil) - return - } - } - - return - } - - // Nothing found. We can recommend to redirect to the same URL with an - // extra trailing slash if a leaf exists for that path - tsr = (path == "/") || - (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && - path == n.path[:len(n.path)-1] && n.handle != nil) - return - } -} - -// Makes a case-insensitive lookup of the given path and tries to find a handler. -// It can optionally also fix trailing slashes. -// It returns the case-corrected path and a bool indicating whether the lookup -// was successful. -func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { - return n.findCaseInsensitivePathRec( - path, - make([]byte, 0, len(path)+1), // preallocate enough memory for new path - [4]byte{}, // empty rune buffer - fixTrailingSlash, - ) -} - -// shift bytes in array by n bytes left -func shiftNRuneBytes(rb [4]byte, n int) [4]byte { - switch n { - case 0: - return rb - case 1: - return [4]byte{rb[1], rb[2], rb[3], 0} - case 2: - return [4]byte{rb[2], rb[3]} - case 3: - return [4]byte{rb[3]} - default: - return [4]byte{} - } -} - -// recursive case-insensitive lookup function used by n.findCaseInsensitivePath -func (n *node) findCaseInsensitivePathRec(path string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) ([]byte, bool) { - npLen := len(n.path) - -walk: // outer loop for walking the tree - for len(path) >= npLen && (npLen == 0 || strings.EqualFold(path[1:npLen], n.path[1:])) { - // add common prefix to result - - oldPath := path - path = path[npLen:] - ciPath = append(ciPath, n.path...) - - if len(path) > 0 { - // If this node does not have a wildcard (param or catchAll) child, - // we can just look up the next child node and continue to walk down - // the tree - if !n.wildChild { - // skip rune bytes already processed - rb = shiftNRuneBytes(rb, npLen) - - if rb[0] != 0 { - // old rune not finished - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == rb[0] { - // continue with child node - n = n.children[i] - npLen = len(n.path) - continue walk - } - } - } else { - // process a new rune - var rv rune - - // find rune start - // runes are up to 4 byte long, - // -4 would definitely be another rune - var off int - for max := min(npLen, 3); off < max; off++ { - if i := npLen - off; utf8.RuneStart(oldPath[i]) { - // read rune from cached path - rv, _ = utf8.DecodeRuneInString(oldPath[i:]) - break - } - } - - // calculate lowercase bytes of current rune - lo := unicode.ToLower(rv) - utf8.EncodeRune(rb[:], lo) - - // skip already processed bytes - rb = shiftNRuneBytes(rb, off) - - for i := 0; i < len(n.indices); i++ { - // lowercase matches - if n.indices[i] == rb[0] { - // must use a recursive approach since both the - // uppercase byte and the lowercase byte might exist - // as an index - if out, found := n.children[i].findCaseInsensitivePathRec( - path, ciPath, rb, fixTrailingSlash, - ); found { - return out, true - } - break - } - } - - // if we found no match, the same for the uppercase rune, - // if it differs - if up := unicode.ToUpper(rv); up != lo { - utf8.EncodeRune(rb[:], up) - rb = shiftNRuneBytes(rb, off) - - for i, c := 0, rb[0]; i < len(n.indices); i++ { - // uppercase matches - if n.indices[i] == c { - // continue with child node - n = n.children[i] - npLen = len(n.path) - continue walk - } - } - } - } - - // Nothing found. We can recommend to redirect to the same URL - // without a trailing slash if a leaf exists for that path - return ciPath, (fixTrailingSlash && path == "/" && n.handle != nil) - } - - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - k := 0 - for k < len(path) && path[k] != '/' { - k++ - } - - // add param value to case insensitive path - ciPath = append(ciPath, path[:k]...) - - // we need to go deeper! - if k < len(path) { - if len(n.children) > 0 { - // continue with child node - n = n.children[0] - npLen = len(n.path) - path = path[k:] - continue - } - - // ... but we can't - if fixTrailingSlash && len(path) == k+1 { - return ciPath, true - } - return ciPath, false - } - - if n.handle != nil { - return ciPath, true - } else if fixTrailingSlash && len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists - n = n.children[0] - if n.path == "/" && n.handle != nil { - return append(ciPath, '/'), true - } - } - return ciPath, false - - case catchAll: - return append(ciPath, path...), true - - default: - panic("invalid node type") - } - } else { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if n.handle != nil { - return ciPath, true - } - - // No handle found. - // Try to fix the path by adding a trailing slash - if fixTrailingSlash { - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - if (len(n.path) == 1 && n.handle != nil) || - (n.nType == catchAll && n.children[0].handle != nil) { - return append(ciPath, '/'), true - } - return ciPath, false - } - } - } - return ciPath, false - } - } - - // Nothing found. - // Try to fix the path by adding / removing a trailing slash - if fixTrailingSlash { - if path == "/" { - return ciPath, true - } - if len(path)+1 == npLen && n.path[len(path)] == '/' && - strings.EqualFold(path[1:], n.path[1:len(path)]) && n.handle != nil { - return append(ciPath, n.path...), true - } - } - return ciPath, false -} diff --git a/vendor/github.com/kballard/go-shellquote/LICENSE b/vendor/github.com/kballard/go-shellquote/LICENSE deleted file mode 100644 index a6d77312..00000000 --- a/vendor/github.com/kballard/go-shellquote/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 Kevin Ballard - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kballard/go-shellquote/README b/vendor/github.com/kballard/go-shellquote/README deleted file mode 100644 index 4d34e87a..00000000 --- a/vendor/github.com/kballard/go-shellquote/README +++ /dev/null @@ -1,36 +0,0 @@ -PACKAGE - -package shellquote - import "github.com/kballard/go-shellquote" - - Shellquote provides utilities for joining/splitting strings using sh's - word-splitting rules. - -VARIABLES - -var ( - UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") - UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") - UnterminatedEscapeError = errors.New("Unterminated backslash-escape") -) - - -FUNCTIONS - -func Join(args ...string) string - Join quotes each argument and joins them with a space. If passed to - /bin/sh, the resulting string will be split back into the original - arguments. - -func Split(input string) (words []string, err error) - Split splits a string according to /bin/sh's word-splitting rules. It - supports backslash-escapes, single-quotes, and double-quotes. Notably it - does not support the $'' style of quoting. It also doesn't attempt to - perform any other sort of expansion, including brace expansion, shell - expansion, or pathname expansion. - - If the given input has an unterminated quoted string or ends in a - backslash-escape, one of UnterminatedSingleQuoteError, - UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. - - diff --git a/vendor/github.com/kballard/go-shellquote/doc.go b/vendor/github.com/kballard/go-shellquote/doc.go deleted file mode 100644 index 9445fa4a..00000000 --- a/vendor/github.com/kballard/go-shellquote/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Shellquote provides utilities for joining/splitting strings using sh's -// word-splitting rules. -package shellquote diff --git a/vendor/github.com/kballard/go-shellquote/quote.go b/vendor/github.com/kballard/go-shellquote/quote.go deleted file mode 100644 index 72a8cb38..00000000 --- a/vendor/github.com/kballard/go-shellquote/quote.go +++ /dev/null @@ -1,102 +0,0 @@ -package shellquote - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// Join quotes each argument and joins them with a space. -// If passed to /bin/sh, the resulting string will be split back into the -// original arguments. -func Join(args ...string) string { - var buf bytes.Buffer - for i, arg := range args { - if i != 0 { - buf.WriteByte(' ') - } - quote(arg, &buf) - } - return buf.String() -} - -const ( - specialChars = "\\'\"`${[|&;<>()*?!" - extraSpecialChars = " \t\n" - prefixChars = "~" -) - -func quote(word string, buf *bytes.Buffer) { - // We want to try to produce a "nice" output. As such, we will - // backslash-escape most characters, but if we encounter a space, or if we - // encounter an extra-special char (which doesn't work with - // backslash-escaping) we switch over to quoting the whole word. We do this - // with a space because it's typically easier for people to read multi-word - // arguments when quoted with a space rather than with ugly backslashes - // everywhere. - origLen := buf.Len() - - if len(word) == 0 { - // oops, no content - buf.WriteString("''") - return - } - - cur, prev := word, word - atStart := true - for len(cur) > 0 { - c, l := utf8.DecodeRuneInString(cur) - cur = cur[l:] - if strings.ContainsRune(specialChars, c) || (atStart && strings.ContainsRune(prefixChars, c)) { - // copy the non-special chars up to this point - if len(cur) < len(prev) { - buf.WriteString(prev[0 : len(prev)-len(cur)-l]) - } - buf.WriteByte('\\') - buf.WriteRune(c) - prev = cur - } else if strings.ContainsRune(extraSpecialChars, c) { - // start over in quote mode - buf.Truncate(origLen) - goto quote - } - atStart = false - } - if len(prev) > 0 { - buf.WriteString(prev) - } - return - -quote: - // quote mode - // Use single-quotes, but if we find a single-quote in the word, we need - // to terminate the string, emit an escaped quote, and start the string up - // again - inQuote := false - for len(word) > 0 { - i := strings.IndexRune(word, '\'') - if i == -1 { - break - } - if i > 0 { - if !inQuote { - buf.WriteByte('\'') - inQuote = true - } - buf.WriteString(word[0:i]) - } - word = word[i+1:] - if inQuote { - buf.WriteByte('\'') - inQuote = false - } - buf.WriteString("\\'") - } - if len(word) > 0 { - if !inQuote { - buf.WriteByte('\'') - } - buf.WriteString(word) - buf.WriteByte('\'') - } -} diff --git a/vendor/github.com/kballard/go-shellquote/unquote.go b/vendor/github.com/kballard/go-shellquote/unquote.go deleted file mode 100644 index b1b13da9..00000000 --- a/vendor/github.com/kballard/go-shellquote/unquote.go +++ /dev/null @@ -1,156 +0,0 @@ -package shellquote - -import ( - "bytes" - "errors" - "strings" - "unicode/utf8" -) - -var ( - UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") - UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") - UnterminatedEscapeError = errors.New("Unterminated backslash-escape") -) - -var ( - splitChars = " \n\t" - singleChar = '\'' - doubleChar = '"' - escapeChar = '\\' - doubleEscapeChars = "$`\"\n\\" -) - -// Split splits a string according to /bin/sh's word-splitting rules. It -// supports backslash-escapes, single-quotes, and double-quotes. Notably it does -// not support the $'' style of quoting. It also doesn't attempt to perform any -// other sort of expansion, including brace expansion, shell expansion, or -// pathname expansion. -// -// If the given input has an unterminated quoted string or ends in a -// backslash-escape, one of UnterminatedSingleQuoteError, -// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. -func Split(input string) (words []string, err error) { - var buf bytes.Buffer - words = make([]string, 0) - - for len(input) > 0 { - // skip any splitChars at the start - c, l := utf8.DecodeRuneInString(input) - if strings.ContainsRune(splitChars, c) { - input = input[l:] - continue - } else if c == escapeChar { - // Look ahead for escaped newline so we can skip over it - next := input[l:] - if len(next) == 0 { - err = UnterminatedEscapeError - return - } - c2, l2 := utf8.DecodeRuneInString(next) - if c2 == '\n' { - input = next[l2:] - continue - } - } - - var word string - word, input, err = splitWord(input, &buf) - if err != nil { - return - } - words = append(words, word) - } - return -} - -func splitWord(input string, buf *bytes.Buffer) (word string, remainder string, err error) { - buf.Reset() - -raw: - { - cur := input - for len(cur) > 0 { - c, l := utf8.DecodeRuneInString(cur) - cur = cur[l:] - if c == singleChar { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - input = cur - goto single - } else if c == doubleChar { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - input = cur - goto double - } else if c == escapeChar { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - input = cur - goto escape - } else if strings.ContainsRune(splitChars, c) { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - return buf.String(), cur, nil - } - } - if len(input) > 0 { - buf.WriteString(input) - input = "" - } - goto done - } - -escape: - { - if len(input) == 0 { - return "", "", UnterminatedEscapeError - } - c, l := utf8.DecodeRuneInString(input) - if c == '\n' { - // a backslash-escaped newline is elided from the output entirely - } else { - buf.WriteString(input[:l]) - } - input = input[l:] - } - goto raw - -single: - { - i := strings.IndexRune(input, singleChar) - if i == -1 { - return "", "", UnterminatedSingleQuoteError - } - buf.WriteString(input[0:i]) - input = input[i+1:] - goto raw - } - -double: - { - cur := input - for len(cur) > 0 { - c, l := utf8.DecodeRuneInString(cur) - cur = cur[l:] - if c == doubleChar { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - input = cur - goto raw - } else if c == escapeChar { - // bash only supports certain escapes in double-quoted strings - c2, l2 := utf8.DecodeRuneInString(cur) - cur = cur[l2:] - if strings.ContainsRune(doubleEscapeChars, c2) { - buf.WriteString(input[0 : len(input)-len(cur)-l-l2]) - if c2 == '\n' { - // newline is special, skip the backslash entirely - } else { - buf.WriteRune(c2) - } - input = cur - } - } - } - return "", "", UnterminatedDoubleQuoteError - } - -done: - return buf.String(), input, nil -} diff --git a/vendor/github.com/kr/fs/LICENSE b/vendor/github.com/kr/fs/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/vendor/github.com/kr/fs/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kr/fs/Readme b/vendor/github.com/kr/fs/Readme deleted file mode 100644 index c95e13fc..00000000 --- a/vendor/github.com/kr/fs/Readme +++ /dev/null @@ -1,3 +0,0 @@ -Filesystem Package - -http://godoc.org/github.com/kr/fs diff --git a/vendor/github.com/kr/fs/filesystem.go b/vendor/github.com/kr/fs/filesystem.go deleted file mode 100644 index f1c4805f..00000000 --- a/vendor/github.com/kr/fs/filesystem.go +++ /dev/null @@ -1,36 +0,0 @@ -package fs - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// FileSystem defines the methods of an abstract filesystem. -type FileSystem interface { - - // ReadDir reads the directory named by dirname and returns a - // list of directory entries. - ReadDir(dirname string) ([]os.FileInfo, error) - - // Lstat returns a FileInfo describing the named file. If the file is a - // symbolic link, the returned FileInfo describes the symbolic link. Lstat - // makes no attempt to follow the link. - Lstat(name string) (os.FileInfo, error) - - // Join joins any number of path elements into a single path, adding a - // separator if necessary. The result is Cleaned; in particular, all - // empty strings are ignored. - // - // The separator is FileSystem specific. - Join(elem ...string) string -} - -// fs represents a FileSystem provided by the os package. -type fs struct{} - -func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) } - -func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } - -func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) } diff --git a/vendor/github.com/kr/fs/walk.go b/vendor/github.com/kr/fs/walk.go deleted file mode 100644 index 6ffa1e0b..00000000 --- a/vendor/github.com/kr/fs/walk.go +++ /dev/null @@ -1,95 +0,0 @@ -// Package fs provides filesystem-related functions. -package fs - -import ( - "os" -) - -// Walker provides a convenient interface for iterating over the -// descendants of a filesystem path. -// Successive calls to the Step method will step through each -// file or directory in the tree, including the root. The files -// are walked in lexical order, which makes the output deterministic -// but means that for very large directories Walker can be inefficient. -// Walker does not follow symbolic links. -type Walker struct { - fs FileSystem - cur item - stack []item - descend bool -} - -type item struct { - path string - info os.FileInfo - err error -} - -// Walk returns a new Walker rooted at root. -func Walk(root string) *Walker { - return WalkFS(root, new(fs)) -} - -// WalkFS returns a new Walker rooted at root on the FileSystem fs. -func WalkFS(root string, fs FileSystem) *Walker { - info, err := fs.Lstat(root) - return &Walker{ - fs: fs, - stack: []item{{root, info, err}}, - } -} - -// Step advances the Walker to the next file or directory, -// which will then be available through the Path, Stat, -// and Err methods. -// It returns false when the walk stops at the end of the tree. -func (w *Walker) Step() bool { - if w.descend && w.cur.err == nil && w.cur.info.IsDir() { - list, err := w.fs.ReadDir(w.cur.path) - if err != nil { - w.cur.err = err - w.stack = append(w.stack, w.cur) - } else { - for i := len(list) - 1; i >= 0; i-- { - path := w.fs.Join(w.cur.path, list[i].Name()) - w.stack = append(w.stack, item{path, list[i], nil}) - } - } - } - - if len(w.stack) == 0 { - return false - } - i := len(w.stack) - 1 - w.cur = w.stack[i] - w.stack = w.stack[:i] - w.descend = true - return true -} - -// Path returns the path to the most recent file or directory -// visited by a call to Step. It contains the argument to Walk -// as a prefix; that is, if Walk is called with "dir", which is -// a directory containing the file "a", Path will return "dir/a". -func (w *Walker) Path() string { - return w.cur.path -} - -// Stat returns info for the most recent file or directory -// visited by a call to Step. -func (w *Walker) Stat() os.FileInfo { - return w.cur.info -} - -// Err returns the error, if any, for the most recent attempt -// by Step to visit a file or directory. If a directory has -// an error, w will not descend into that directory. -func (w *Walker) Err() error { - return w.cur.err -} - -// SkipDir causes the currently visited directory to be skipped. -// If w is not on a directory, SkipDir has no effect. -func (w *Walker) SkipDir() { - w.descend = false -} diff --git a/vendor/github.com/lxc/lxd/AUTHORS b/vendor/github.com/lxc/lxd/AUTHORS deleted file mode 100644 index f7c0c6a2..00000000 --- a/vendor/github.com/lxc/lxd/AUTHORS +++ /dev/null @@ -1,5 +0,0 @@ -Unless mentioned otherwise in a specific file's header, all code in this -project is released under the Apache 2.0 license. - -The list of authors and contributors can be retrieved from the git -commit history and in some cases, the file headers. diff --git a/vendor/github.com/lxc/lxd/COPYING b/vendor/github.com/lxc/lxd/COPYING deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/lxc/lxd/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/lxc/lxd/client/connection.go b/vendor/github.com/lxc/lxd/client/connection.go deleted file mode 100644 index 895a0f81..00000000 --- a/vendor/github.com/lxc/lxd/client/connection.go +++ /dev/null @@ -1,358 +0,0 @@ -package lxd - -import ( - "context" - "crypto/sha256" - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery" - "github.com/gorilla/websocket" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/logger" - "github.com/lxc/lxd/shared/simplestreams" -) - -// ConnectionArgs represents a set of common connection properties. -type ConnectionArgs struct { - // TLS certificate of the remote server. If not specified, the system CA is used. - TLSServerCert string - - // TLS certificate to use for client authentication. - TLSClientCert string - - // TLS key to use for client authentication. - TLSClientKey string - - // TLS CA to validate against when in PKI mode. - TLSCA string - - // User agent string - UserAgent string - - // Authentication type - AuthType string - - // Authentication interactor - AuthInteractor []httpbakery.Interactor - - // Custom proxy - Proxy func(*http.Request) (*url.URL, error) - - // Custom HTTP Client (used as base for the connection) - HTTPClient *http.Client - - // TransportWrapper wraps the *http.Transport set by lxd - TransportWrapper func(*http.Transport) HTTPTransporter - - // Controls whether a client verifies the server's certificate chain and host name. - InsecureSkipVerify bool - - // Cookie jar - CookieJar http.CookieJar - - // Skip automatic GetServer request upon connection - SkipGetServer bool - - // Caching support for image servers - CachePath string - CacheExpiry time.Duration -} - -// ConnectLXD lets you connect to a remote LXD daemon over HTTPs. -// -// A client certificate (TLSClientCert) and key (TLSClientKey) must be provided. -// -// If connecting to a LXD daemon running in PKI mode, the PKI CA (TLSCA) must also be provided. -// -// Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). -func ConnectLXD(url string, args *ConnectionArgs) (InstanceServer, error) { - return ConnectLXDWithContext(context.Background(), url, args) -} - -// ConnectLXDWithContext lets you connect to a remote LXD daemon over HTTPs with context.Context. -// -// A client certificate (TLSClientCert) and key (TLSClientKey) must be provided. -// -// If connecting to a LXD daemon running in PKI mode, the PKI CA (TLSCA) must also be provided. -// -// Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). -func ConnectLXDWithContext(ctx context.Context, url string, args *ConnectionArgs) (InstanceServer, error) { - // Cleanup URL - url = strings.TrimSuffix(url, "/") - - logger.Debug("Connecting to a remote LXD over HTTPS", logger.Ctx{"url": url}) - - return httpsLXD(ctx, url, args) -} - -// ConnectLXDHTTP lets you connect to a VM agent over a VM socket. -func ConnectLXDHTTP(args *ConnectionArgs, client *http.Client) (InstanceServer, error) { - return ConnectLXDHTTPWithContext(context.Background(), args, client) -} - -// ConnectLXDHTTPWithContext lets you connect to a VM agent over a VM socket with context.Context. -func ConnectLXDHTTPWithContext(ctx context.Context, args *ConnectionArgs, client *http.Client) (InstanceServer, error) { - logger.Debug("Connecting to a VM agent over a VM socket") - - // Use empty args if not specified - if args == nil { - args = &ConnectionArgs{} - } - - httpBaseURL, err := url.Parse("https://custom.socket") - if err != nil { - return nil, err - } - - ctxConnected, ctxConnectedCancel := context.WithCancel(context.Background()) - - // Initialize the client struct - server := ProtocolLXD{ - ctx: ctx, - httpBaseURL: *httpBaseURL, - httpProtocol: "custom", - httpUserAgent: args.UserAgent, - ctxConnected: ctxConnected, - ctxConnectedCancel: ctxConnectedCancel, - eventConns: make(map[string]*websocket.Conn), - eventListeners: make(map[string][]*EventListener), - } - - // Setup the HTTP client - server.http = client - - // Test the connection and seed the server information - if !args.SkipGetServer { - serverStatus, _, err := server.GetServer() - if err != nil { - return nil, err - } - - // Record the server certificate - server.httpCertificate = serverStatus.Environment.Certificate - } - - return &server, nil -} - -// ConnectLXDUnix lets you connect to a remote LXD daemon over a local unix socket. -// -// If the path argument is empty, then $LXD_SOCKET will be used, if -// unset $LXD_DIR/unix.socket will be used and if that one isn't set -// either, then the path will default to /var/lib/lxd/unix.socket. -func ConnectLXDUnix(path string, args *ConnectionArgs) (InstanceServer, error) { - return ConnectLXDUnixWithContext(context.Background(), path, args) -} - -// ConnectLXDUnixWithContext lets you connect to a remote LXD daemon over a local unix socket with context.Context. -// -// If the path argument is empty, then $LXD_SOCKET will be used, if -// unset $LXD_DIR/unix.socket will be used and if that one isn't set -// either, then the path will default to /var/lib/lxd/unix.socket. -func ConnectLXDUnixWithContext(ctx context.Context, path string, args *ConnectionArgs) (InstanceServer, error) { - logger.Debug("Connecting to a local LXD over a Unix socket") - - // Use empty args if not specified - if args == nil { - args = &ConnectionArgs{} - } - - httpBaseURL, err := url.Parse("http://unix.socket") - if err != nil { - return nil, err - } - - ctxConnected, ctxConnectedCancel := context.WithCancel(context.Background()) - - // Initialize the client struct - server := ProtocolLXD{ - ctx: ctx, - httpBaseURL: *httpBaseURL, - httpUnixPath: path, - httpProtocol: "unix", - httpUserAgent: args.UserAgent, - ctxConnected: ctxConnected, - ctxConnectedCancel: ctxConnectedCancel, - eventConns: make(map[string]*websocket.Conn), - eventListeners: make(map[string][]*EventListener), - } - - // Determine the socket path - if path == "" { - path = os.Getenv("LXD_SOCKET") - if path == "" { - lxdDir := os.Getenv("LXD_DIR") - if lxdDir == "" { - lxdDir = "/var/lib/lxd" - } - - path = filepath.Join(lxdDir, "unix.socket") - } - } - - path = shared.HostPath(path) - - // Setup the HTTP client - httpClient, err := unixHTTPClient(args, path) - if err != nil { - return nil, err - } - - server.http = httpClient - - // Test the connection and seed the server information - if !args.SkipGetServer { - serverStatus, _, err := server.GetServer() - if err != nil { - return nil, err - } - - // Record the server certificate - server.httpCertificate = serverStatus.Environment.Certificate - } - - return &server, nil -} - -// ConnectPublicLXD lets you connect to a remote public LXD daemon over HTTPs. -// -// Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). -func ConnectPublicLXD(url string, args *ConnectionArgs) (ImageServer, error) { - return ConnectPublicLXDWithContext(context.Background(), url, args) -} - -// ConnectPublicLXDWithContext lets you connect to a remote public LXD daemon over HTTPs with context.Context. -// -// Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). -func ConnectPublicLXDWithContext(ctx context.Context, url string, args *ConnectionArgs) (ImageServer, error) { - logger.Debug("Connecting to a remote public LXD over HTTPS") - - // Cleanup URL - url = strings.TrimSuffix(url, "/") - - return httpsLXD(ctx, url, args) -} - -// ConnectSimpleStreams lets you connect to a remote SimpleStreams image server over HTTPs. -// -// Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). -func ConnectSimpleStreams(url string, args *ConnectionArgs) (ImageServer, error) { - logger.Debug("Connecting to a remote simplestreams server", logger.Ctx{"URL": url}) - - // Cleanup URL - url = strings.TrimSuffix(url, "/") - - // Use empty args if not specified - if args == nil { - args = &ConnectionArgs{} - } - - // Initialize the client struct - server := ProtocolSimpleStreams{ - httpHost: url, - httpUserAgent: args.UserAgent, - httpCertificate: args.TLSServerCert, - } - - // Setup the HTTP client - httpClient, err := tlsHTTPClient(args.HTTPClient, args.TLSClientCert, args.TLSClientKey, args.TLSCA, args.TLSServerCert, args.InsecureSkipVerify, args.Proxy, args.TransportWrapper) - if err != nil { - return nil, err - } - - server.http = httpClient - - // Get simplestreams client - ssClient := simplestreams.NewClient(url, *httpClient, args.UserAgent) - server.ssClient = ssClient - - // Setup the cache - if args.CachePath != "" { - if !shared.PathExists(args.CachePath) { - return nil, fmt.Errorf("Cache directory %q doesn't exist", args.CachePath) - } - - hashedURL := fmt.Sprintf("%x", sha256.Sum256([]byte(url))) - - cachePath := filepath.Join(args.CachePath, hashedURL) - cacheExpiry := args.CacheExpiry - if cacheExpiry == 0 { - cacheExpiry = time.Hour - } - - if !shared.PathExists(cachePath) { - err := os.Mkdir(cachePath, 0755) - if err != nil { - return nil, err - } - } - - ssClient.SetCache(cachePath, cacheExpiry) - } - - return &server, nil -} - -// Internal function called by ConnectLXD and ConnectPublicLXD. -func httpsLXD(ctx context.Context, requestURL string, args *ConnectionArgs) (InstanceServer, error) { - // Use empty args if not specified - if args == nil { - args = &ConnectionArgs{} - } - - httpBaseURL, err := url.Parse(requestURL) - if err != nil { - return nil, err - } - - ctxConnected, ctxConnectedCancel := context.WithCancel(context.Background()) - - // Initialize the client struct - server := ProtocolLXD{ - ctx: ctx, - httpCertificate: args.TLSServerCert, - httpBaseURL: *httpBaseURL, - httpProtocol: "https", - httpUserAgent: args.UserAgent, - bakeryInteractor: args.AuthInteractor, - ctxConnected: ctxConnected, - ctxConnectedCancel: ctxConnectedCancel, - eventConns: make(map[string]*websocket.Conn), - eventListeners: make(map[string][]*EventListener), - } - - if args.AuthType == "candid" { - server.RequireAuthenticated(true) - } - - // Setup the HTTP client - httpClient, err := tlsHTTPClient(args.HTTPClient, args.TLSClientCert, args.TLSClientKey, args.TLSCA, args.TLSServerCert, args.InsecureSkipVerify, args.Proxy, args.TransportWrapper) - if err != nil { - return nil, err - } - - if args.CookieJar != nil { - httpClient.Jar = args.CookieJar - } - - server.http = httpClient - if args.AuthType == "candid" { - server.setupBakeryClient() - } - - // Test the connection and seed the server information - if !args.SkipGetServer { - _, _, err := server.GetServer() - if err != nil { - return nil, err - } - } - return &server, nil -} diff --git a/vendor/github.com/lxc/lxd/client/doc.go b/vendor/github.com/lxc/lxd/client/doc.go deleted file mode 100644 index dce659cd..00000000 --- a/vendor/github.com/lxc/lxd/client/doc.go +++ /dev/null @@ -1,146 +0,0 @@ -// Package lxd implements a client for the LXD API -// -// # Overview -// -// This package lets you connect to LXD daemons or SimpleStream image -// servers over a Unix socket or HTTPs. You can then interact with those -// remote servers, creating instances, images, moving them around, ... -// -// # Example - instance creation -// -// This creates a container on a local LXD daemon and then starts it. -// -// // Connect to LXD over the Unix socket -// c, err := lxd.ConnectLXDUnix("", nil) -// if err != nil { -// return err -// } -// -// // Instance creation request -// req := api.InstancesPost{ -// Name: "my-container", -// Source: api.InstanceSource{ -// Type: "image", -// Alias: "my-image", -// }, -// Type: "container" -// } -// -// // Get LXD to create the instance (background operation) -// op, err := c.CreateInstance(req) -// if err != nil { -// return err -// } -// -// // Wait for the operation to complete -// err = op.Wait() -// if err != nil { -// return err -// } -// -// // Get LXD to start the instance (background operation) -// reqState := api.InstanceStatePut{ -// Action: "start", -// Timeout: -1, -// } -// -// op, err = c.UpdateInstanceState(name, reqState, "") -// if err != nil { -// return err -// } -// -// // Wait for the operation to complete -// err = op.Wait() -// if err != nil { -// return err -// } -// -// # Example - command execution -// -// This executes an interactive bash terminal -// -// // Connect to LXD over the Unix socket -// c, err := lxd.ConnectLXDUnix("", nil) -// if err != nil { -// return err -// } -// -// // Setup the exec request -// req := api.InstanceExecPost{ -// Command: []string{"bash"}, -// WaitForWS: true, -// Interactive: true, -// Width: 80, -// Height: 15, -// } -// -// // Setup the exec arguments (fds) -// args := lxd.InstanceExecArgs{ -// Stdin: os.Stdin, -// Stdout: os.Stdout, -// Stderr: os.Stderr, -// } -// -// // Setup the terminal (set to raw mode) -// if req.Interactive { -// cfd := int(syscall.Stdin) -// oldttystate, err := termios.MakeRaw(cfd) -// if err != nil { -// return err -// } -// -// defer termios.Restore(cfd, oldttystate) -// } -// -// // Get the current state -// op, err := c.ExecInstance("c1", req, &args) -// if err != nil { -// return err -// } -// -// // Wait for it to complete -// err = op.Wait() -// if err != nil { -// return err -// } -// -// # Example - image copy -// -// This copies an image from a simplestreams server to a local LXD daemon -// -// // Connect to LXD over the Unix socket -// c, err := lxd.ConnectLXDUnix("", nil) -// if err != nil { -// return err -// } -// -// // Connect to the remote SimpleStreams server -// d, err = lxd.ConnectSimpleStreams("https://images.linuxcontainers.org", nil) -// if err != nil { -// return err -// } -// -// // Resolve the alias -// alias, _, err := d.GetImageAlias("centos/7") -// if err != nil { -// return err -// } -// -// // Get the image information -// image, _, err := d.GetImage(alias.Target) -// if err != nil { -// return err -// } -// -// // Ask LXD to copy the image from the remote server -// op, err := d.CopyImage(*image, c, nil) -// if err != nil { -// return err -// } -// -// // And wait for it to finish -// err = op.Wait() -// if err != nil { -// return err -// } -package lxd diff --git a/vendor/github.com/lxc/lxd/client/events.go b/vendor/github.com/lxc/lxd/client/events.go deleted file mode 100644 index d2cf5d74..00000000 --- a/vendor/github.com/lxc/lxd/client/events.go +++ /dev/null @@ -1,109 +0,0 @@ -package lxd - -import ( - "context" - "fmt" - "sync" - - "github.com/lxc/lxd/shared/api" -) - -// The EventListener struct is used to interact with a LXD event stream. -type EventListener struct { - r *ProtocolLXD - ctx context.Context - ctxCancel context.CancelFunc - err error - - // projectName stores which project this event listener is associated with (empty for all projects). - projectName string - targets []*EventTarget - targetsLock sync.Mutex -} - -// The EventTarget struct is returned to the caller of AddHandler and used in RemoveHandler. -type EventTarget struct { - function func(api.Event) - types []string -} - -// AddHandler adds a function to be called whenever an event is received. -func (e *EventListener) AddHandler(types []string, function func(api.Event)) (*EventTarget, error) { - if function == nil { - return nil, fmt.Errorf("A valid function must be provided") - } - - // Handle locking - e.targetsLock.Lock() - defer e.targetsLock.Unlock() - - // Create a new target - target := EventTarget{ - function: function, - types: types, - } - - // And add it to the targets - e.targets = append(e.targets, &target) - - return &target, nil -} - -// RemoveHandler removes a function to be called whenever an event is received. -func (e *EventListener) RemoveHandler(target *EventTarget) error { - if target == nil { - return fmt.Errorf("A valid event target must be provided") - } - - // Handle locking - e.targetsLock.Lock() - defer e.targetsLock.Unlock() - - // Locate and remove the function from the list - for i, entry := range e.targets { - if entry == target { - copy(e.targets[i:], e.targets[i+1:]) - e.targets[len(e.targets)-1] = nil - e.targets = e.targets[:len(e.targets)-1] - return nil - } - } - - return fmt.Errorf("Couldn't find this function and event types combination") -} - -// Disconnect must be used once done listening for events. -func (e *EventListener) Disconnect() { - // Handle locking - e.r.eventListenersLock.Lock() - defer e.r.eventListenersLock.Unlock() - - if e.ctx.Err() != nil { - return - } - - // Locate and remove it from the global list - for i, listener := range e.r.eventListeners[e.projectName] { - if listener == e { - copy(e.r.eventListeners[e.projectName][i:], e.r.eventListeners[e.projectName][i+1:]) - e.r.eventListeners[e.projectName][len(e.r.eventListeners[e.projectName])-1] = nil - e.r.eventListeners[e.projectName] = e.r.eventListeners[e.projectName][:len(e.r.eventListeners[e.projectName])-1] - break - } - } - - // Turn off the handler - e.err = nil - e.ctxCancel() -} - -// Wait blocks until the server disconnects the connection or Disconnect() is called. -func (e *EventListener) Wait() error { - <-e.ctx.Done() - return e.err -} - -// IsActive returns true if this listener is still connected, false otherwise. -func (e *EventListener) IsActive() bool { - return e.ctx.Err() == nil -} diff --git a/vendor/github.com/lxc/lxd/client/interfaces.go b/vendor/github.com/lxc/lxd/client/interfaces.go deleted file mode 100644 index 81408261..00000000 --- a/vendor/github.com/lxc/lxd/client/interfaces.go +++ /dev/null @@ -1,686 +0,0 @@ -package lxd - -import ( - "context" - "io" - "net" - "net/http" - - "github.com/gorilla/websocket" - "github.com/pkg/sftp" - - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/cancel" - "github.com/lxc/lxd/shared/ioprogress" -) - -// The Operation type represents a currently running operation. -type Operation interface { - AddHandler(function func(api.Operation)) (target *EventTarget, err error) - Cancel() (err error) - Get() (op api.Operation) - GetWebsocket(secret string) (conn *websocket.Conn, err error) - RemoveHandler(target *EventTarget) (err error) - Refresh() (err error) - Wait() (err error) - WaitContext(ctx context.Context) error -} - -// The RemoteOperation type represents an Operation that may be using multiple servers. -type RemoteOperation interface { - AddHandler(function func(api.Operation)) (target *EventTarget, err error) - CancelTarget() (err error) - GetTarget() (op *api.Operation, err error) - Wait() (err error) -} - -// The Server type represents a generic read-only server. -type Server interface { - GetConnectionInfo() (info *ConnectionInfo, err error) - GetHTTPClient() (client *http.Client, err error) - DoHTTP(req *http.Request) (resp *http.Response, err error) - Disconnect() -} - -// The ImageServer type represents a read-only image server. -type ImageServer interface { - Server - - // Image handling functions - GetImages() (images []api.Image, err error) - GetImageFingerprints() (fingerprints []string, err error) - GetImagesWithFilter(filters []string) (images []api.Image, err error) - - GetImage(fingerprint string) (image *api.Image, ETag string, err error) - GetImageFile(fingerprint string, req ImageFileRequest) (resp *ImageFileResponse, err error) - GetImageSecret(fingerprint string) (secret string, err error) - - GetPrivateImage(fingerprint string, secret string) (image *api.Image, ETag string, err error) - GetPrivateImageFile(fingerprint string, secret string, req ImageFileRequest) (resp *ImageFileResponse, err error) - - GetImageAliases() (aliases []api.ImageAliasesEntry, err error) - GetImageAliasNames() (names []string, err error) - - GetImageAlias(name string) (alias *api.ImageAliasesEntry, ETag string, err error) - GetImageAliasType(imageType string, name string) (alias *api.ImageAliasesEntry, ETag string, err error) - GetImageAliasArchitectures(imageType string, name string) (entries map[string]*api.ImageAliasesEntry, err error) - - ExportImage(fingerprint string, image api.ImageExportPost) (Operation, error) -} - -// The InstanceServer type represents a full featured LXD server. -type InstanceServer interface { - ImageServer - - // Server functions - GetMetrics() (metrics string, err error) - GetServer() (server *api.Server, ETag string, err error) - GetServerResources() (resources *api.Resources, err error) - UpdateServer(server api.ServerPut, ETag string) (err error) - HasExtension(extension string) (exists bool) - RequireAuthenticated(authenticated bool) - IsClustered() (clustered bool) - UseTarget(name string) (client InstanceServer) - UseProject(name string) (client InstanceServer) - - // Certificate functions - GetCertificateFingerprints() (fingerprints []string, err error) - GetCertificates() (certificates []api.Certificate, err error) - GetCertificate(fingerprint string) (certificate *api.Certificate, ETag string, err error) - CreateCertificate(certificate api.CertificatesPost) (err error) - UpdateCertificate(fingerprint string, certificate api.CertificatePut, ETag string) (err error) - DeleteCertificate(fingerprint string) (err error) - CreateCertificateToken(certificate api.CertificatesPost) (op Operation, err error) - - // Container functions - // - // Deprecated: Those functions are deprecated and won't be updated anymore. - // Please use the equivalent Instance function instead. - GetContainerNames() (names []string, err error) - GetContainers() (containers []api.Container, err error) - GetContainersFull() (containers []api.ContainerFull, err error) - GetContainer(name string) (container *api.Container, ETag string, err error) - CreateContainer(container api.ContainersPost) (op Operation, err error) - CreateContainerFromImage(source ImageServer, image api.Image, imgcontainer api.ContainersPost) (op RemoteOperation, err error) - CopyContainer(source InstanceServer, container api.Container, args *ContainerCopyArgs) (op RemoteOperation, err error) - UpdateContainer(name string, container api.ContainerPut, ETag string) (op Operation, err error) - RenameContainer(name string, container api.ContainerPost) (op Operation, err error) - MigrateContainer(name string, container api.ContainerPost) (op Operation, err error) - DeleteContainer(name string) (op Operation, err error) - - ExecContainer(containerName string, exec api.ContainerExecPost, args *ContainerExecArgs) (op Operation, err error) - ConsoleContainer(containerName string, console api.ContainerConsolePost, args *ContainerConsoleArgs) (op Operation, err error) - GetContainerConsoleLog(containerName string, args *ContainerConsoleLogArgs) (content io.ReadCloser, err error) - DeleteContainerConsoleLog(containerName string, args *ContainerConsoleLogArgs) (err error) - - GetContainerFile(containerName string, path string) (content io.ReadCloser, resp *ContainerFileResponse, err error) - CreateContainerFile(containerName string, path string, args ContainerFileArgs) (err error) - DeleteContainerFile(containerName string, path string) (err error) - - GetContainerSnapshotNames(containerName string) (names []string, err error) - GetContainerSnapshots(containerName string) (snapshots []api.ContainerSnapshot, err error) - GetContainerSnapshot(containerName string, name string) (snapshot *api.ContainerSnapshot, ETag string, err error) - CreateContainerSnapshot(containerName string, snapshot api.ContainerSnapshotsPost) (op Operation, err error) - CopyContainerSnapshot(source InstanceServer, containerName string, snapshot api.ContainerSnapshot, args *ContainerSnapshotCopyArgs) (op RemoteOperation, err error) - RenameContainerSnapshot(containerName string, name string, container api.ContainerSnapshotPost) (op Operation, err error) - MigrateContainerSnapshot(containerName string, name string, container api.ContainerSnapshotPost) (op Operation, err error) - DeleteContainerSnapshot(containerName string, name string) (op Operation, err error) - UpdateContainerSnapshot(containerName string, name string, container api.ContainerSnapshotPut, ETag string) (op Operation, err error) - - GetContainerBackupNames(containerName string) (names []string, err error) - GetContainerBackups(containername string) (backups []api.ContainerBackup, err error) - GetContainerBackup(containerName string, name string) (backup *api.ContainerBackup, ETag string, err error) - CreateContainerBackup(containerName string, backup api.ContainerBackupsPost) (op Operation, err error) - RenameContainerBackup(containerName string, name string, backup api.ContainerBackupPost) (op Operation, err error) - DeleteContainerBackup(containerName string, name string) (op Operation, err error) - GetContainerBackupFile(containerName string, name string, req *BackupFileRequest) (resp *BackupFileResponse, err error) - CreateContainerFromBackup(args ContainerBackupArgs) (op Operation, err error) - - GetContainerState(name string) (state *api.ContainerState, ETag string, err error) - UpdateContainerState(name string, state api.ContainerStatePut, ETag string) (op Operation, err error) - - GetContainerLogfiles(name string) (logfiles []string, err error) - GetContainerLogfile(name string, filename string) (content io.ReadCloser, err error) - DeleteContainerLogfile(name string, filename string) (err error) - - GetContainerMetadata(name string) (metadata *api.ImageMetadata, ETag string, err error) - SetContainerMetadata(name string, metadata api.ImageMetadata, ETag string) (err error) - - GetContainerTemplateFiles(containerName string) (templates []string, err error) - GetContainerTemplateFile(containerName string, templateName string) (content io.ReadCloser, err error) - CreateContainerTemplateFile(containerName string, templateName string, content io.ReadSeeker) (err error) - UpdateContainerTemplateFile(containerName string, templateName string, content io.ReadSeeker) (err error) - DeleteContainerTemplateFile(name string, templateName string) (err error) - - // Instance functions. - GetInstanceNames(instanceType api.InstanceType) (names []string, err error) - GetInstanceNamesAllProjects(instanceType api.InstanceType) (names map[string][]string, err error) - GetInstances(instanceType api.InstanceType) (instances []api.Instance, err error) - GetInstancesFull(instanceType api.InstanceType) (instances []api.InstanceFull, err error) - GetInstancesAllProjects(instanceType api.InstanceType) (instances []api.Instance, err error) - GetInstancesFullAllProjects(instanceType api.InstanceType) (instances []api.InstanceFull, err error) - GetInstancesWithFilter(instanceType api.InstanceType, filters []string) (instances []api.Instance, err error) - GetInstancesFullWithFilter(instanceType api.InstanceType, filters []string) (instances []api.InstanceFull, err error) - GetInstancesAllProjectsWithFilter(instanceType api.InstanceType, filters []string) (instances []api.Instance, err error) - GetInstancesFullAllProjectsWithFilter(instanceType api.InstanceType, filters []string) (instances []api.InstanceFull, err error) - GetInstance(name string) (instance *api.Instance, ETag string, err error) - GetInstanceFull(name string) (instance *api.InstanceFull, ETag string, err error) - CreateInstance(instance api.InstancesPost) (op Operation, err error) - CreateInstanceFromImage(source ImageServer, image api.Image, req api.InstancesPost) (op RemoteOperation, err error) - CopyInstance(source InstanceServer, instance api.Instance, args *InstanceCopyArgs) (op RemoteOperation, err error) - UpdateInstance(name string, instance api.InstancePut, ETag string) (op Operation, err error) - RenameInstance(name string, instance api.InstancePost) (op Operation, err error) - MigrateInstance(name string, instance api.InstancePost) (op Operation, err error) - DeleteInstance(name string) (op Operation, err error) - UpdateInstances(state api.InstancesPut, ETag string) (op Operation, err error) - - ExecInstance(instanceName string, exec api.InstanceExecPost, args *InstanceExecArgs) (op Operation, err error) - ConsoleInstance(instanceName string, console api.InstanceConsolePost, args *InstanceConsoleArgs) (op Operation, err error) - ConsoleInstanceDynamic(instanceName string, console api.InstanceConsolePost, args *InstanceConsoleArgs) (Operation, func(io.ReadWriteCloser) error, error) - - GetInstanceConsoleLog(instanceName string, args *InstanceConsoleLogArgs) (content io.ReadCloser, err error) - DeleteInstanceConsoleLog(instanceName string, args *InstanceConsoleLogArgs) (err error) - - GetInstanceFile(instanceName string, path string) (content io.ReadCloser, resp *InstanceFileResponse, err error) - CreateInstanceFile(instanceName string, path string, args InstanceFileArgs) (err error) - DeleteInstanceFile(instanceName string, path string) (err error) - - GetInstanceFileSFTPConn(instanceName string) (net.Conn, error) - GetInstanceFileSFTP(instanceName string) (*sftp.Client, error) - - GetInstanceSnapshotNames(instanceName string) (names []string, err error) - GetInstanceSnapshots(instanceName string) (snapshots []api.InstanceSnapshot, err error) - GetInstanceSnapshot(instanceName string, name string) (snapshot *api.InstanceSnapshot, ETag string, err error) - CreateInstanceSnapshot(instanceName string, snapshot api.InstanceSnapshotsPost) (op Operation, err error) - CopyInstanceSnapshot(source InstanceServer, instanceName string, snapshot api.InstanceSnapshot, args *InstanceSnapshotCopyArgs) (op RemoteOperation, err error) - RenameInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPost) (op Operation, err error) - MigrateInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPost) (op Operation, err error) - DeleteInstanceSnapshot(instanceName string, name string) (op Operation, err error) - UpdateInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPut, ETag string) (op Operation, err error) - - GetInstanceBackupNames(instanceName string) (names []string, err error) - GetInstanceBackups(instanceName string) (backups []api.InstanceBackup, err error) - GetInstanceBackup(instanceName string, name string) (backup *api.InstanceBackup, ETag string, err error) - CreateInstanceBackup(instanceName string, backup api.InstanceBackupsPost) (op Operation, err error) - RenameInstanceBackup(instanceName string, name string, backup api.InstanceBackupPost) (op Operation, err error) - DeleteInstanceBackup(instanceName string, name string) (op Operation, err error) - GetInstanceBackupFile(instanceName string, name string, req *BackupFileRequest) (resp *BackupFileResponse, err error) - CreateInstanceFromBackup(args InstanceBackupArgs) (op Operation, err error) - - GetInstanceState(name string) (state *api.InstanceState, ETag string, err error) - UpdateInstanceState(name string, state api.InstanceStatePut, ETag string) (op Operation, err error) - - GetInstanceLogfiles(name string) (logfiles []string, err error) - GetInstanceLogfile(name string, filename string) (content io.ReadCloser, err error) - DeleteInstanceLogfile(name string, filename string) (err error) - - GetInstanceMetadata(name string) (metadata *api.ImageMetadata, ETag string, err error) - UpdateInstanceMetadata(name string, metadata api.ImageMetadata, ETag string) (err error) - - GetInstanceTemplateFiles(instanceName string) (templates []string, err error) - GetInstanceTemplateFile(instanceName string, templateName string) (content io.ReadCloser, err error) - CreateInstanceTemplateFile(instanceName string, templateName string, content io.ReadSeeker) (err error) - DeleteInstanceTemplateFile(name string, templateName string) (err error) - - // Event handling functions - GetEvents() (listener *EventListener, err error) - GetEventsAllProjects() (listener *EventListener, err error) - SendEvent(event api.Event) error - - // Image functions - CreateImage(image api.ImagesPost, args *ImageCreateArgs) (op Operation, err error) - CopyImage(source ImageServer, image api.Image, args *ImageCopyArgs) (op RemoteOperation, err error) - UpdateImage(fingerprint string, image api.ImagePut, ETag string) (err error) - DeleteImage(fingerprint string) (op Operation, err error) - RefreshImage(fingerprint string) (op Operation, err error) - CreateImageSecret(fingerprint string) (op Operation, err error) - CreateImageAlias(alias api.ImageAliasesPost) (err error) - UpdateImageAlias(name string, alias api.ImageAliasesEntryPut, ETag string) (err error) - RenameImageAlias(name string, alias api.ImageAliasesEntryPost) (err error) - DeleteImageAlias(name string) (err error) - - // Network functions ("network" API extension) - GetNetworkNames() (names []string, err error) - GetNetworks() (networks []api.Network, err error) - GetNetwork(name string) (network *api.Network, ETag string, err error) - GetNetworkLeases(name string) (leases []api.NetworkLease, err error) - GetNetworkState(name string) (state *api.NetworkState, err error) - CreateNetwork(network api.NetworksPost) (err error) - UpdateNetwork(name string, network api.NetworkPut, ETag string) (err error) - RenameNetwork(name string, network api.NetworkPost) (err error) - DeleteNetwork(name string) (err error) - - // Network forward functions ("network_forward" API extension) - GetNetworkForwardAddresses(networkName string) ([]string, error) - GetNetworkForwards(networkName string) ([]api.NetworkForward, error) - GetNetworkForward(networkName string, listenAddress string) (forward *api.NetworkForward, ETag string, err error) - CreateNetworkForward(networkName string, forward api.NetworkForwardsPost) error - UpdateNetworkForward(networkName string, listenAddress string, forward api.NetworkForwardPut, ETag string) (err error) - DeleteNetworkForward(networkName string, listenAddress string) (err error) - - // Network load balancer functions ("network_load_balancer" API extension) - GetNetworkLoadBalancerAddresses(networkName string) ([]string, error) - GetNetworkLoadBalancers(networkName string) ([]api.NetworkLoadBalancer, error) - GetNetworkLoadBalancer(networkName string, listenAddress string) (forward *api.NetworkLoadBalancer, ETag string, err error) - CreateNetworkLoadBalancer(networkName string, forward api.NetworkLoadBalancersPost) error - UpdateNetworkLoadBalancer(networkName string, listenAddress string, forward api.NetworkLoadBalancerPut, ETag string) (err error) - DeleteNetworkLoadBalancer(networkName string, listenAddress string) (err error) - - // Network peer functions ("network_peer" API extension) - GetNetworkPeerNames(networkName string) ([]string, error) - GetNetworkPeers(networkName string) ([]api.NetworkPeer, error) - GetNetworkPeer(networkName string, peerName string) (peer *api.NetworkPeer, ETag string, err error) - CreateNetworkPeer(networkName string, peer api.NetworkPeersPost) error - UpdateNetworkPeer(networkName string, peerName string, peer api.NetworkPeerPut, ETag string) (err error) - DeleteNetworkPeer(networkName string, peerName string) (err error) - - // Network ACL functions ("network_acl" API extension) - GetNetworkACLNames() (names []string, err error) - GetNetworkACLs() (acls []api.NetworkACL, err error) - GetNetworkACL(name string) (acl *api.NetworkACL, ETag string, err error) - GetNetworkACLLogfile(name string) (log io.ReadCloser, err error) - CreateNetworkACL(acl api.NetworkACLsPost) (err error) - UpdateNetworkACL(name string, acl api.NetworkACLPut, ETag string) (err error) - RenameNetworkACL(name string, acl api.NetworkACLPost) (err error) - DeleteNetworkACL(name string) (err error) - - // Network zone functions ("network_dns" API extension) - GetNetworkZoneNames() (names []string, err error) - GetNetworkZones() (zones []api.NetworkZone, err error) - GetNetworkZone(name string) (zone *api.NetworkZone, ETag string, err error) - CreateNetworkZone(zone api.NetworkZonesPost) (err error) - UpdateNetworkZone(name string, zone api.NetworkZonePut, ETag string) (err error) - DeleteNetworkZone(name string) (err error) - - GetNetworkZoneRecordNames(zone string) (names []string, err error) - GetNetworkZoneRecords(zone string) (records []api.NetworkZoneRecord, err error) - GetNetworkZoneRecord(zone string, name string) (record *api.NetworkZoneRecord, ETag string, err error) - CreateNetworkZoneRecord(zone string, record api.NetworkZoneRecordsPost) (err error) - UpdateNetworkZoneRecord(zone string, name string, record api.NetworkZoneRecordPut, ETag string) (err error) - DeleteNetworkZoneRecord(zone string, name string) (err error) - - // Operation functions - GetOperationUUIDs() (uuids []string, err error) - GetOperations() (operations []api.Operation, err error) - GetOperation(uuid string) (op *api.Operation, ETag string, err error) - GetOperationWait(uuid string, timeout int) (op *api.Operation, ETag string, err error) - GetOperationWaitSecret(uuid string, secret string, timeout int) (op *api.Operation, ETag string, err error) - GetOperationWebsocket(uuid string, secret string) (conn *websocket.Conn, err error) - DeleteOperation(uuid string) (err error) - - // Profile functions - GetProfileNames() (names []string, err error) - GetProfiles() (profiles []api.Profile, err error) - GetProfile(name string) (profile *api.Profile, ETag string, err error) - CreateProfile(profile api.ProfilesPost) (err error) - UpdateProfile(name string, profile api.ProfilePut, ETag string) (err error) - RenameProfile(name string, profile api.ProfilePost) (err error) - DeleteProfile(name string) (err error) - - // Project functions - GetProjectNames() (names []string, err error) - GetProjects() (projects []api.Project, err error) - GetProject(name string) (project *api.Project, ETag string, err error) - GetProjectState(name string) (project *api.ProjectState, err error) - CreateProject(project api.ProjectsPost) (err error) - UpdateProject(name string, project api.ProjectPut, ETag string) (err error) - RenameProject(name string, project api.ProjectPost) (op Operation, err error) - DeleteProject(name string) (err error) - - // Storage pool functions ("storage" API extension) - GetStoragePoolNames() (names []string, err error) - GetStoragePools() (pools []api.StoragePool, err error) - GetStoragePool(name string) (pool *api.StoragePool, ETag string, err error) - GetStoragePoolResources(name string) (resources *api.ResourcesStoragePool, err error) - CreateStoragePool(pool api.StoragePoolsPost) (err error) - UpdateStoragePool(name string, pool api.StoragePoolPut, ETag string) (err error) - DeleteStoragePool(name string) (err error) - - // Storage bucket functions ("storage_buckets" API extension) - GetStoragePoolBucketNames(poolName string) ([]string, error) - GetStoragePoolBuckets(poolName string) ([]api.StorageBucket, error) - GetStoragePoolBucket(poolName string, bucketName string) (bucket *api.StorageBucket, ETag string, err error) - CreateStoragePoolBucket(poolName string, bucket api.StorageBucketsPost) (*api.StorageBucketKey, error) - UpdateStoragePoolBucket(poolName string, bucketName string, bucket api.StorageBucketPut, ETag string) (err error) - DeleteStoragePoolBucket(poolName string, bucketName string) (err error) - GetStoragePoolBucketKeyNames(poolName string, bucketName string) ([]string, error) - GetStoragePoolBucketKeys(poolName string, bucketName string) ([]api.StorageBucketKey, error) - GetStoragePoolBucketKey(poolName string, bucketName string, keyName string) (key *api.StorageBucketKey, ETag string, err error) - CreateStoragePoolBucketKey(poolName string, bucketName string, key api.StorageBucketKeysPost) (newKey *api.StorageBucketKey, err error) - UpdateStoragePoolBucketKey(poolName string, bucketName string, keyName string, key api.StorageBucketKeyPut, ETag string) (err error) - DeleteStoragePoolBucketKey(poolName string, bucketName string, keyName string) (err error) - - // Storage volume functions ("storage" API extension) - GetStoragePoolVolumeNames(pool string) (names []string, err error) - GetStoragePoolVolumeNamesAllProjects(pool string) (names []string, err error) - GetStoragePoolVolumes(pool string) (volumes []api.StorageVolume, err error) - GetStoragePoolVolumesAllProjects(pool string) (volumes []api.StorageVolume, err error) - GetStoragePoolVolumesWithFilter(pool string, filters []string) (volumes []api.StorageVolume, err error) - GetStoragePoolVolumesWithFilterAllProjects(pool string, filters []string) (volumes []api.StorageVolume, err error) - GetStoragePoolVolume(pool string, volType string, name string) (volume *api.StorageVolume, ETag string, err error) - GetStoragePoolVolumeState(pool string, volType string, name string) (state *api.StorageVolumeState, err error) - CreateStoragePoolVolume(pool string, volume api.StorageVolumesPost) (err error) - UpdateStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePut, ETag string) (err error) - DeleteStoragePoolVolume(pool string, volType string, name string) (err error) - RenameStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePost) (err error) - CopyStoragePoolVolume(pool string, source InstanceServer, sourcePool string, volume api.StorageVolume, args *StoragePoolVolumeCopyArgs) (op RemoteOperation, err error) - MoveStoragePoolVolume(pool string, source InstanceServer, sourcePool string, volume api.StorageVolume, args *StoragePoolVolumeMoveArgs) (op RemoteOperation, err error) - MigrateStoragePoolVolume(pool string, volume api.StorageVolumePost) (op Operation, err error) - - // Storage volume snapshot functions ("storage_api_volume_snapshots" API extension) - CreateStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshot api.StorageVolumeSnapshotsPost) (op Operation, err error) - DeleteStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string) (op Operation, err error) - GetStoragePoolVolumeSnapshotNames(pool string, volumeType string, volumeName string) (names []string, err error) - GetStoragePoolVolumeSnapshots(pool string, volumeType string, volumeName string) (snapshots []api.StorageVolumeSnapshot, err error) - GetStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string) (snapshot *api.StorageVolumeSnapshot, ETag string, err error) - RenameStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string, snapshot api.StorageVolumeSnapshotPost) (op Operation, err error) - UpdateStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string, volume api.StorageVolumeSnapshotPut, ETag string) (err error) - - // Storage volume backup functions ("custom_volume_backup" API extension) - GetStoragePoolVolumeBackupNames(pool string, volName string) (names []string, err error) - GetStoragePoolVolumeBackups(pool string, volName string) (backups []api.StoragePoolVolumeBackup, err error) - GetStoragePoolVolumeBackup(pool string, volName string, name string) (backup *api.StoragePoolVolumeBackup, ETag string, err error) - CreateStoragePoolVolumeBackup(pool string, volName string, backup api.StoragePoolVolumeBackupsPost) (op Operation, err error) - RenameStoragePoolVolumeBackup(pool string, volName string, name string, backup api.StoragePoolVolumeBackupPost) (op Operation, err error) - DeleteStoragePoolVolumeBackup(pool string, volName string, name string) (op Operation, err error) - GetStoragePoolVolumeBackupFile(pool string, volName string, name string, req *BackupFileRequest) (resp *BackupFileResponse, err error) - CreateStoragePoolVolumeFromBackup(pool string, args StoragePoolVolumeBackupArgs) (op Operation, err error) - - // Cluster functions ("cluster" API extensions) - GetCluster() (cluster *api.Cluster, ETag string, err error) - UpdateCluster(cluster api.ClusterPut, ETag string) (op Operation, err error) - DeleteClusterMember(name string, force bool) (err error) - GetClusterMemberNames() (names []string, err error) - GetClusterMembers() (members []api.ClusterMember, err error) - GetClusterMember(name string) (member *api.ClusterMember, ETag string, err error) - UpdateClusterMember(name string, member api.ClusterMemberPut, ETag string) (err error) - RenameClusterMember(name string, member api.ClusterMemberPost) (err error) - CreateClusterMember(member api.ClusterMembersPost) (op Operation, err error) - UpdateClusterCertificate(certs api.ClusterCertificatePut, ETag string) (err error) - GetClusterMemberState(name string) (*api.ClusterMemberState, string, error) - UpdateClusterMemberState(name string, state api.ClusterMemberStatePost) (op Operation, err error) - GetClusterGroups() ([]api.ClusterGroup, error) - GetClusterGroupNames() ([]string, error) - RenameClusterGroup(name string, group api.ClusterGroupPost) error - CreateClusterGroup(group api.ClusterGroupsPost) error - DeleteClusterGroup(name string) error - UpdateClusterGroup(name string, group api.ClusterGroupPut, ETag string) error - GetClusterGroup(name string) (*api.ClusterGroup, string, error) - - // Warning functions - GetWarningUUIDs() (uuids []string, err error) - GetWarnings() (warnings []api.Warning, err error) - GetWarning(UUID string) (warning *api.Warning, ETag string, err error) - UpdateWarning(UUID string, warning api.WarningPut, ETag string) (err error) - DeleteWarning(UUID string) (err error) - - // Internal functions (for internal use) - RawQuery(method string, path string, data any, queryETag string) (resp *api.Response, ETag string, err error) - RawWebsocket(path string) (conn *websocket.Conn, err error) - RawOperation(method string, path string, data any, queryETag string) (op Operation, ETag string, err error) -} - -// The ConnectionInfo struct represents general information for a connection. -type ConnectionInfo struct { - Addresses []string - Certificate string - Protocol string - URL string - SocketPath string - Project string - Target string -} - -// The BackupFileRequest struct is used for a backup download request. -type BackupFileRequest struct { - // Writer for the backup file - BackupFile io.WriteSeeker - - // Progress handler (called whenever some progress is made) - ProgressHandler func(progress ioprogress.ProgressData) - - // A canceler that can be used to interrupt some part of the image download request - Canceler *cancel.HTTPRequestCanceller -} - -// The BackupFileResponse struct is used as the response for backup downloads. -type BackupFileResponse struct { - // Size of backup file - Size int64 -} - -// The ImageCreateArgs struct is used for direct image upload. -type ImageCreateArgs struct { - // Reader for the meta file - MetaFile io.Reader - - // Filename for the meta file - MetaName string - - // Reader for the rootfs file - RootfsFile io.Reader - - // Filename for the rootfs file - RootfsName string - - // Progress handler (called with upload progress) - ProgressHandler func(progress ioprogress.ProgressData) - - // Type of the image (container or virtual-machine) - Type string -} - -// The ImageFileRequest struct is used for an image download request. -type ImageFileRequest struct { - // Writer for the metadata file - MetaFile io.WriteSeeker - - // Writer for the rootfs file - RootfsFile io.WriteSeeker - - // Progress handler (called whenever some progress is made) - ProgressHandler func(progress ioprogress.ProgressData) - - // A canceler that can be used to interrupt some part of the image download request - Canceler *cancel.HTTPRequestCanceller - - // Path retriever for image delta downloads - // If set, it must return the path to the image file or an empty string if not available - DeltaSourceRetriever func(fingerprint string, file string) string -} - -// The ImageFileResponse struct is used as the response for image downloads. -type ImageFileResponse struct { - // Filename for the metadata file - MetaName string - - // Size of the metadata file - MetaSize int64 - - // Filename for the rootfs file - RootfsName string - - // Size of the rootfs file - RootfsSize int64 -} - -// The ImageCopyArgs struct is used to pass additional options during image copy. -type ImageCopyArgs struct { - // Aliases to add to the copied image. - Aliases []api.ImageAlias - - // Whether to have LXD keep this image up to date - AutoUpdate bool - - // Whether to copy the source image aliases to the target - CopyAliases bool - - // Whether this image is to be made available to unauthenticated users - Public bool - - // The image type to use for resolution - Type string - - // The transfer mode, can be "pull" (default), "push" or "relay" - Mode string - - // List of profiles to apply on the target. - Profiles []string -} - -// The StoragePoolVolumeCopyArgs struct is used to pass additional options -// during storage volume copy. -type StoragePoolVolumeCopyArgs struct { - // New name for the target - Name string - - // The transfer mode, can be "pull" (default), "push" or "relay" - Mode string - - // API extension: storage_api_volume_snapshots - VolumeOnly bool - - // API extension: custom_volume_refresh - Refresh bool -} - -// The StoragePoolVolumeMoveArgs struct is used to pass additional options -// during storage volume move. -type StoragePoolVolumeMoveArgs struct { - StoragePoolVolumeCopyArgs - - // API extension: storage_volume_project_move - Project string -} - -// The StoragePoolVolumeBackupArgs struct is used when creating a storage volume from a backup. -// API extension: custom_volume_backup. -type StoragePoolVolumeBackupArgs struct { - // The backup file - BackupFile io.Reader - - // Name to import backup as - Name string -} - -// The InstanceBackupArgs struct is used when creating a instance from a backup. -type InstanceBackupArgs struct { - // The backup file - BackupFile io.Reader - - // Storage pool to use - PoolName string - - // Name to import backup as - Name string -} - -// The InstanceCopyArgs struct is used to pass additional options during instance copy. -type InstanceCopyArgs struct { - // If set, the instance will be renamed on copy - Name string - - // If set, the instance running state will be transferred (live migration) - Live bool - - // If set, only the instance will copied, its snapshots won't - InstanceOnly bool - - // The transfer mode, can be "pull" (default), "push" or "relay" - Mode string - - // API extension: container_incremental_copy - // Perform an incremental copy - Refresh bool - - // API extension: instance_allow_inconsistent_copy - AllowInconsistent bool -} - -// The InstanceSnapshotCopyArgs struct is used to pass additional options during instance copy. -type InstanceSnapshotCopyArgs struct { - // If set, the instance will be renamed on copy - Name string - - // The transfer mode, can be "pull" (default), "push" or "relay" - Mode string - - // API extension: container_snapshot_stateful_migration - // If set, the instance running state will be transferred (live migration) - Live bool -} - -// The InstanceConsoleArgs struct is used to pass additional options during a -// instance console session. -type InstanceConsoleArgs struct { - // Bidirectional fd to pass to the instance - Terminal io.ReadWriteCloser - - // Control message handler (window resize) - Control func(conn *websocket.Conn) - - // Closing this Channel causes a disconnect from the instance's console - ConsoleDisconnect chan bool -} - -// The InstanceConsoleLogArgs struct is used to pass additional options during a -// instance console log request. -type InstanceConsoleLogArgs struct { -} - -// The InstanceExecArgs struct is used to pass additional options during instance exec. -type InstanceExecArgs struct { - // Standard input - Stdin io.ReadCloser - - // Standard output - Stdout io.WriteCloser - - // Standard error - Stderr io.WriteCloser - - // Control message handler (window resize, signals, ...) - Control func(conn *websocket.Conn) - - // Channel that will be closed when all data operations are done - DataDone chan bool -} - -// The InstanceFileArgs struct is used to pass the various options for a instance file upload. -type InstanceFileArgs struct { - // File content - Content io.ReadSeeker - - // User id that owns the file - UID int64 - - // Group id that owns the file - GID int64 - - // File permissions - Mode int - - // File type (file or directory) - Type string - - // File write mode (overwrite or append) - WriteMode string -} - -// The InstanceFileResponse struct is used as part of the response for a instance file download. -type InstanceFileResponse struct { - // User id that owns the file - UID int64 - - // Group id that owns the file - GID int64 - - // File permissions - Mode int - - // File type (file or directory) - Type string - - // If a directory, the list of files inside it - Entries []string -} diff --git a/vendor/github.com/lxc/lxd/client/interfaces_legacy.go b/vendor/github.com/lxc/lxd/client/interfaces_legacy.go deleted file mode 100644 index 1e1ded9b..00000000 --- a/vendor/github.com/lxc/lxd/client/interfaces_legacy.go +++ /dev/null @@ -1,126 +0,0 @@ -package lxd - -import ( - "io" - - "github.com/gorilla/websocket" -) - -// The ContainerServer type is the legacy name for InstanceServer. -type ContainerServer InstanceServer - -// The ContainerBackupArgs struct is used when creating a container from a backup. -type ContainerBackupArgs struct { - // The backup file - BackupFile io.Reader - - // Storage pool to use - PoolName string -} - -// The ContainerCopyArgs struct is used to pass additional options during container copy. -type ContainerCopyArgs struct { - // If set, the container will be renamed on copy - Name string - - // If set, the container running state will be transferred (live migration) - Live bool - - // If set, only the container will copied, its snapshots won't - ContainerOnly bool - - // The transfer mode, can be "pull" (default), "push" or "relay" - Mode string - - // API extension: container_incremental_copy - // Perform an incremental copy - Refresh bool -} - -// The ContainerSnapshotCopyArgs struct is used to pass additional options during container copy. -type ContainerSnapshotCopyArgs struct { - // If set, the container will be renamed on copy - Name string - - // The transfer mode, can be "pull" (default), "push" or "relay" - Mode string - - // API extension: container_snapshot_stateful_migration - // If set, the container running state will be transferred (live migration) - Live bool -} - -// The ContainerConsoleArgs struct is used to pass additional options during a -// container console session. -type ContainerConsoleArgs struct { - // Bidirectional fd to pass to the container - Terminal io.ReadWriteCloser - - // Control message handler (window resize) - Control func(conn *websocket.Conn) - - // Closing this Channel causes a disconnect from the container's console - ConsoleDisconnect chan bool -} - -// The ContainerConsoleLogArgs struct is used to pass additional options during a -// container console log request. -type ContainerConsoleLogArgs struct { -} - -// The ContainerExecArgs struct is used to pass additional options during container exec. -type ContainerExecArgs struct { - // Standard input - Stdin io.ReadCloser - - // Standard output - Stdout io.WriteCloser - - // Standard error - Stderr io.WriteCloser - - // Control message handler (window resize, signals, ...) - Control func(conn *websocket.Conn) - - // Channel that will be closed when all data operations are done - DataDone chan bool -} - -// The ContainerFileArgs struct is used to pass the various options for a container file upload. -type ContainerFileArgs struct { - // File content - Content io.ReadSeeker - - // User id that owns the file - UID int64 - - // Group id that owns the file - GID int64 - - // File permissions - Mode int - - // File type (file or directory) - Type string - - // File write mode (overwrite or append) - WriteMode string -} - -// The ContainerFileResponse struct is used as part of the response for a container file download. -type ContainerFileResponse struct { - // User id that owns the file - UID int64 - - // Group id that owns the file - GID int64 - - // File permissions - Mode int - - // File type (file or directory) - Type string - - // If a directory, the list of files inside it - Entries []string -} diff --git a/vendor/github.com/lxc/lxd/client/lxd.go b/vendor/github.com/lxc/lxd/client/lxd.go deleted file mode 100644 index 25903298..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd.go +++ /dev/null @@ -1,493 +0,0 @@ -package lxd - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - neturl "net/url" - "strings" - "sync" - "time" - - "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery" - "github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery" - "github.com/gorilla/websocket" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/logger" - "github.com/lxc/lxd/shared/tcp" -) - -// ProtocolLXD represents a LXD API server. -type ProtocolLXD struct { - ctx context.Context - server *api.Server - ctxConnected context.Context - ctxConnectedCancel context.CancelFunc - - // eventConns contains event listener connections associated to a project name (or empty for all projects). - eventConns map[string]*websocket.Conn - - // eventConnsLock controls write access to the eventConns. - eventConnsLock sync.Mutex - - // eventListeners is a slice of event listeners associated to a project name (or empty for all projects). - eventListeners map[string][]*EventListener - eventListenersLock sync.Mutex - - http *http.Client - httpCertificate string - httpBaseURL neturl.URL - httpUnixPath string - httpProtocol string - httpUserAgent string - - bakeryClient *httpbakery.Client - bakeryInteractor []httpbakery.Interactor - requireAuthenticated bool - - clusterTarget string - project string -} - -// Disconnect gets rid of any background goroutines. -func (r *ProtocolLXD) Disconnect() { - if r.ctxConnected.Err() != nil { - r.ctxConnectedCancel() - } -} - -// GetConnectionInfo returns the basic connection information used to interact with the server. -func (r *ProtocolLXD) GetConnectionInfo() (*ConnectionInfo, error) { - info := ConnectionInfo{} - info.Certificate = r.httpCertificate - info.Protocol = "lxd" - info.URL = r.httpBaseURL.String() - info.SocketPath = r.httpUnixPath - - info.Project = r.project - if info.Project == "" { - info.Project = "default" - } - - info.Target = r.clusterTarget - if info.Target == "" && r.server != nil { - info.Target = r.server.Environment.ServerName - } - - urls := []string{} - if r.httpProtocol == "https" { - urls = append(urls, r.httpBaseURL.String()) - } - - if r.server != nil && len(r.server.Environment.Addresses) > 0 { - for _, addr := range r.server.Environment.Addresses { - if strings.HasPrefix(addr, ":") { - continue - } - - url := fmt.Sprintf("https://%s", addr) - if !shared.StringInSlice(url, urls) { - urls = append(urls, url) - } - } - } - - info.Addresses = urls - - return &info, nil -} - -func (r *ProtocolLXD) isSameServer(server Server) bool { - // Short path checking if the two structs are identical. - if r == server { - return true - } - - // Short path if either of the structs are nil. - if r == nil || server == nil { - return false - } - - // When dealing with uninitialized servers, we can't safely compare. - if r.server == nil { - return false - } - - // Get the connection info from both servers. - srcInfo, err := r.GetConnectionInfo() - if err != nil { - return false - } - - dstInfo, err := server.GetConnectionInfo() - if err != nil { - return false - } - - // Check whether we're dealing with the same server. - return srcInfo.Protocol == dstInfo.Protocol && srcInfo.Certificate == dstInfo.Certificate && - srcInfo.Project == dstInfo.Project && srcInfo.Target == dstInfo.Target -} - -// GetHTTPClient returns the http client used for the connection. This can be used to set custom http options. -func (r *ProtocolLXD) GetHTTPClient() (*http.Client, error) { - if r.http == nil { - return nil, fmt.Errorf("HTTP client isn't set, bad connection") - } - - return r.http, nil -} - -// DoHTTP performs a Request, using macaroon authentication if set. -func (r *ProtocolLXD) DoHTTP(req *http.Request) (*http.Response, error) { - r.addClientHeaders(req) - - // Send the request through - if r.bakeryClient != nil { - return r.bakeryClient.Do(req) - } - - return r.http.Do(req) -} - -// addClientHeaders sets headers from client settings. -// User-Agent (if r.httpUserAgent is set). -// X-LXD-authenticated (if r.requireAuthenticated is set). -// Bakery authentication header and cookie (if r.bakeryClient is set). -func (r *ProtocolLXD) addClientHeaders(req *http.Request) { - if r.httpUserAgent != "" { - req.Header.Set("User-Agent", r.httpUserAgent) - } - - if r.requireAuthenticated { - req.Header.Set("X-LXD-authenticated", "true") - } - - if r.bakeryClient != nil { - req.Header.Set(httpbakery.BakeryProtocolHeader, fmt.Sprint(bakery.LatestVersion)) - - for _, cookie := range r.http.Jar.Cookies(req.URL) { - req.AddCookie(cookie) - } - } -} - -// RequireAuthenticated sets whether we expect to be authenticated with the server. -func (r *ProtocolLXD) RequireAuthenticated(authenticated bool) { - r.requireAuthenticated = authenticated -} - -// RawQuery allows directly querying the LXD API -// -// This should only be used by internal LXD tools. -func (r *ProtocolLXD) RawQuery(method string, path string, data any, ETag string) (*api.Response, string, error) { - // Generate the URL - url := fmt.Sprintf("%s%s", r.httpBaseURL.String(), path) - - return r.rawQuery(method, url, data, ETag) -} - -// RawWebsocket allows directly connection to LXD API websockets -// -// This should only be used by internal LXD tools. -func (r *ProtocolLXD) RawWebsocket(path string) (*websocket.Conn, error) { - return r.websocket(path) -} - -// RawOperation allows direct querying of a LXD API endpoint returning -// background operations. -func (r *ProtocolLXD) RawOperation(method string, path string, data any, ETag string) (Operation, string, error) { - return r.queryOperation(method, path, data, ETag) -} - -// Internal functions. -func lxdParseResponse(resp *http.Response) (*api.Response, string, error) { - // Get the ETag - etag := resp.Header.Get("ETag") - - // Decode the response - decoder := json.NewDecoder(resp.Body) - response := api.Response{} - - err := decoder.Decode(&response) - if err != nil { - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - return nil, "", fmt.Errorf("Failed to fetch %s: %s", resp.Request.URL.String(), resp.Status) - } - - return nil, "", err - } - - // Handle errors - if response.Type == api.ErrorResponse { - return nil, "", api.StatusErrorf(resp.StatusCode, response.Error) - } - - return &response, etag, nil -} - -func (r *ProtocolLXD) rawQuery(method string, url string, data any, ETag string) (*api.Response, string, error) { - var req *http.Request - var err error - - // Log the request - logger.Debug("Sending request to LXD", logger.Ctx{ - "method": method, - "url": url, - "etag": ETag, - }) - - // Get a new HTTP request setup - if data != nil { - switch data := data.(type) { - case io.Reader: - // Some data to be sent along with the request - req, err = http.NewRequestWithContext(r.ctx, method, url, data) - if err != nil { - return nil, "", err - } - - // Set the encoding accordingly - req.Header.Set("Content-Type", "application/octet-stream") - default: - // Encode the provided data - buf := bytes.Buffer{} - err := json.NewEncoder(&buf).Encode(data) - if err != nil { - return nil, "", err - } - - // Some data to be sent along with the request - // Use a reader since the request body needs to be seekable - req, err = http.NewRequestWithContext(r.ctx, method, url, bytes.NewReader(buf.Bytes())) - if err != nil { - return nil, "", err - } - - // Set the encoding accordingly - req.Header.Set("Content-Type", "application/json") - - // Log the data - logger.Debugf(logger.Pretty(data)) - } - } else { - // No data to be sent along with the request - req, err = http.NewRequestWithContext(r.ctx, method, url, nil) - if err != nil { - return nil, "", err - } - } - - // Set the ETag - if ETag != "" { - req.Header.Set("If-Match", ETag) - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, "", err - } - - defer func() { _ = resp.Body.Close() }() - - return lxdParseResponse(resp) -} - -// setURLQueryAttributes modifies the supplied URL's query string with the client's current target and project. -func (r *ProtocolLXD) setURLQueryAttributes(apiURL *neturl.URL) { - // Extract query fields and update for cluster targeting or project - values := apiURL.Query() - if r.clusterTarget != "" { - if values.Get("target") == "" { - values.Set("target", r.clusterTarget) - } - } - - if r.project != "" { - if values.Get("project") == "" && values.Get("all-projects") == "" { - values.Set("project", r.project) - } - } - - apiURL.RawQuery = values.Encode() -} - -func (r *ProtocolLXD) setQueryAttributes(uri string) (string, error) { - // Parse the full URI - fields, err := neturl.Parse(uri) - if err != nil { - return "", err - } - - r.setURLQueryAttributes(fields) - - return fields.String(), nil -} - -func (r *ProtocolLXD) query(method string, path string, data any, ETag string) (*api.Response, string, error) { - // Generate the URL - url := fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path) - - // Add project/target - url, err := r.setQueryAttributes(url) - if err != nil { - return nil, "", err - } - - // Run the actual query - return r.rawQuery(method, url, data, ETag) -} - -func (r *ProtocolLXD) queryStruct(method string, path string, data any, ETag string, target any) (string, error) { - resp, etag, err := r.query(method, path, data, ETag) - if err != nil { - return "", err - } - - err = resp.MetadataAsStruct(&target) - if err != nil { - return "", err - } - - // Log the data - logger.Debugf("Got response struct from LXD") - logger.Debugf(logger.Pretty(target)) - - return etag, nil -} - -func (r *ProtocolLXD) queryOperation(method string, path string, data any, ETag string) (Operation, string, error) { - // Attempt to setup an early event listener - listener, err := r.GetEvents() - if err != nil { - listener = nil - } - - // Send the query - resp, etag, err := r.query(method, path, data, ETag) - if err != nil { - if listener != nil { - listener.Disconnect() - } - - return nil, "", err - } - - // Get to the operation - respOperation, err := resp.MetadataAsOperation() - if err != nil { - if listener != nil { - listener.Disconnect() - } - - return nil, "", err - } - - // Setup an Operation wrapper - op := operation{ - Operation: *respOperation, - r: r, - listener: listener, - chActive: make(chan bool), - } - - // Log the data - logger.Debugf("Got operation from LXD") - logger.Debugf(logger.Pretty(op.Operation)) - - return &op, etag, nil -} - -func (r *ProtocolLXD) rawWebsocket(url string) (*websocket.Conn, error) { - // Grab the http transport handler - httpTransport, err := r.getUnderlyingHTTPTransport() - if err != nil { - return nil, err - } - - // Setup a new websocket dialer based on it - dialer := websocket.Dialer{ - NetDialContext: httpTransport.DialContext, - TLSClientConfig: httpTransport.TLSClientConfig, - Proxy: httpTransport.Proxy, - HandshakeTimeout: time.Second * 5, - } - - // Create temporary http.Request using the http url, not the ws one, so that we can add the client headers - // for the websocket request. - req := &http.Request{URL: &r.httpBaseURL, Header: http.Header{}} - r.addClientHeaders(req) - - // Establish the connection - conn, resp, err := dialer.Dial(url, req.Header) - if err != nil { - if resp != nil { - _, _, err = lxdParseResponse(resp) - } - - return nil, err - } - - // Set TCP timeout options. - remoteTCP, _ := tcp.ExtractConn(conn.UnderlyingConn()) - if remoteTCP != nil { - err = tcp.SetTimeouts(remoteTCP, 0) - if err != nil { - logger.Warn("Failed setting TCP timeouts on remote connection", logger.Ctx{"err": err}) - } - } - - // Log the data - logger.Debugf("Connected to the websocket: %v", url) - - return conn, nil -} - -func (r *ProtocolLXD) websocket(path string) (*websocket.Conn, error) { - // Generate the URL - var url string - if r.httpBaseURL.Scheme == "https" { - url = fmt.Sprintf("wss://%s/1.0%s", r.httpBaseURL.Host, path) - } else { - url = fmt.Sprintf("ws://%s/1.0%s", r.httpBaseURL.Host, path) - } - - return r.rawWebsocket(url) -} - -func (r *ProtocolLXD) setupBakeryClient() { - r.bakeryClient = httpbakery.NewClient() - r.bakeryClient.Client = r.http - if r.bakeryInteractor != nil { - for _, interactor := range r.bakeryInteractor { - r.bakeryClient.AddInteractor(interactor) - } - } -} - -// WithContext returns a client that will add context.Context. -func (r *ProtocolLXD) WithContext(ctx context.Context) InstanceServer { - rr := r - rr.ctx = ctx - return rr -} - -// getUnderlyingHTTPTransport returns the *http.Transport used by the http client. If the http -// client was initialized with a HTTPTransporter, it returns the wrapped *http.Transport. -func (r *ProtocolLXD) getUnderlyingHTTPTransport() (*http.Transport, error) { - switch t := r.http.Transport.(type) { - case *http.Transport: - return t, nil - case HTTPTransporter: - return t.Transport(), nil - default: - return nil, fmt.Errorf("Unexpected http.Transport type, %T", r) - } -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_certificates.go b/vendor/github.com/lxc/lxd/client/lxd_certificates.go deleted file mode 100644 index b5206c0f..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_certificates.go +++ /dev/null @@ -1,106 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// Certificate handling functions - -// GetCertificateFingerprints returns a list of certificate fingerprints. -func (r *ProtocolLXD) GetCertificateFingerprints() ([]string, error) { - // Fetch the raw URL values. - urls := []string{} - baseURL := "/certificates" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetCertificates returns a list of certificates. -func (r *ProtocolLXD) GetCertificates() ([]api.Certificate, error) { - certificates := []api.Certificate{} - - // Fetch the raw value - _, err := r.queryStruct("GET", "/certificates?recursion=1", nil, "", &certificates) - if err != nil { - return nil, err - } - - return certificates, nil -} - -// GetCertificate returns the certificate entry for the provided fingerprint. -func (r *ProtocolLXD) GetCertificate(fingerprint string) (*api.Certificate, string, error) { - certificate := api.Certificate{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/certificates/%s", url.PathEscape(fingerprint)), nil, "", &certificate) - if err != nil { - return nil, "", err - } - - return &certificate, etag, nil -} - -// CreateCertificate adds a new certificate to the LXD trust store. -func (r *ProtocolLXD) CreateCertificate(certificate api.CertificatesPost) error { - // Send the request - _, _, err := r.query("POST", "/certificates", certificate, "") - if err != nil { - return err - } - - return nil -} - -// UpdateCertificate updates the certificate definition. -func (r *ProtocolLXD) UpdateCertificate(fingerprint string, certificate api.CertificatePut, ETag string) error { - if !r.HasExtension("certificate_update") { - return fmt.Errorf("The server is missing the required \"certificate_update\" API extension") - } - - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/certificates/%s", url.PathEscape(fingerprint)), certificate, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteCertificate removes a certificate from the LXD trust store. -func (r *ProtocolLXD) DeleteCertificate(fingerprint string) error { - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/certificates/%s", url.PathEscape(fingerprint)), nil, "") - if err != nil { - return err - } - - return nil -} - -// CreateCertificateToken requests a certificate add token. -func (r *ProtocolLXD) CreateCertificateToken(certificate api.CertificatesPost) (Operation, error) { - if !r.HasExtension("certificate_token") { - return nil, fmt.Errorf("The server is missing the required \"certificate_token\" API extension") - } - - if !certificate.Token { - return nil, fmt.Errorf("Token needs to be true if requesting a token") - } - - // Send the request - op, _, err := r.queryOperation("POST", "/certificates", certificate, "") - if err != nil { - return nil, err - } - - return op, nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_cluster.go b/vendor/github.com/lxc/lxd/client/lxd_cluster.go deleted file mode 100644 index ba1c7876..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_cluster.go +++ /dev/null @@ -1,311 +0,0 @@ -package lxd - -import ( - "fmt" - - "github.com/lxc/lxd/shared/api" -) - -// GetCluster returns information about a cluster -// -// If this client is not trusted, the password must be supplied. -func (r *ProtocolLXD) GetCluster() (*api.Cluster, string, error) { - if !r.HasExtension("clustering") { - return nil, "", fmt.Errorf("The server is missing the required \"clustering\" API extension") - } - - cluster := &api.Cluster{} - etag, err := r.queryStruct("GET", "/cluster", nil, "", &cluster) - if err != nil { - return nil, "", err - } - - return cluster, etag, nil -} - -// UpdateCluster requests to bootstrap a new cluster or join an existing one. -func (r *ProtocolLXD) UpdateCluster(cluster api.ClusterPut, ETag string) (Operation, error) { - if !r.HasExtension("clustering") { - return nil, fmt.Errorf("The server is missing the required \"clustering\" API extension") - } - - if cluster.ServerAddress != "" || cluster.ClusterPassword != "" || len(cluster.MemberConfig) > 0 { - if !r.HasExtension("clustering_join") { - return nil, fmt.Errorf("The server is missing the required \"clustering_join\" API extension") - } - } - - op, _, err := r.queryOperation("PUT", "/cluster", cluster, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteClusterMember makes the given member leave the cluster (gracefully or not, -// depending on the force flag). -func (r *ProtocolLXD) DeleteClusterMember(name string, force bool) error { - if !r.HasExtension("clustering") { - return fmt.Errorf("The server is missing the required \"clustering\" API extension") - } - - params := "" - if force { - params += "?force=1" - } - - _, _, err := r.query("DELETE", fmt.Sprintf("/cluster/members/%s%s", name, params), nil, "") - if err != nil { - return err - } - - return nil -} - -// GetClusterMemberNames returns the URLs of the current members in the cluster. -func (r *ProtocolLXD) GetClusterMemberNames() ([]string, error) { - if !r.HasExtension("clustering") { - return nil, fmt.Errorf("The server is missing the required \"clustering\" API extension") - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := "/cluster/members" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetClusterMembers returns the current members of the cluster. -func (r *ProtocolLXD) GetClusterMembers() ([]api.ClusterMember, error) { - if !r.HasExtension("clustering") { - return nil, fmt.Errorf("The server is missing the required \"clustering\" API extension") - } - - members := []api.ClusterMember{} - _, err := r.queryStruct("GET", "/cluster/members?recursion=1", nil, "", &members) - if err != nil { - return nil, err - } - - return members, nil -} - -// GetClusterMember returns information about the given member. -func (r *ProtocolLXD) GetClusterMember(name string) (*api.ClusterMember, string, error) { - if !r.HasExtension("clustering") { - return nil, "", fmt.Errorf("The server is missing the required \"clustering\" API extension") - } - - member := api.ClusterMember{} - etag, err := r.queryStruct("GET", fmt.Sprintf("/cluster/members/%s", name), nil, "", &member) - if err != nil { - return nil, "", err - } - - return &member, etag, nil -} - -// UpdateClusterMember updates information about the given member. -func (r *ProtocolLXD) UpdateClusterMember(name string, member api.ClusterMemberPut, ETag string) error { - if !r.HasExtension("clustering_edit_roles") { - return fmt.Errorf("The server is missing the required \"clustering_edit_roles\" API extension") - } - - if member.FailureDomain != "" { - if !r.HasExtension("clustering_failure_domains") { - return fmt.Errorf("The server is missing the required \"clustering_failure_domains\" API extension") - } - } - - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/cluster/members/%s", name), member, ETag) - if err != nil { - return err - } - - return nil -} - -// RenameClusterMember changes the name of an existing member. -func (r *ProtocolLXD) RenameClusterMember(name string, member api.ClusterMemberPost) error { - if !r.HasExtension("clustering") { - return fmt.Errorf("The server is missing the required \"clustering\" API extension") - } - - _, _, err := r.query("POST", fmt.Sprintf("/cluster/members/%s", name), member, "") - if err != nil { - return err - } - - return nil -} - -// CreateClusterMember generates a join token to add a cluster member. -func (r *ProtocolLXD) CreateClusterMember(member api.ClusterMembersPost) (Operation, error) { - if !r.HasExtension("clustering_join_token") { - return nil, fmt.Errorf("The server is missing the required \"clustering_join_token\" API extension") - } - - op, _, err := r.queryOperation("POST", "/cluster/members", member, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// UpdateClusterCertificate updates the cluster certificate for every node in the cluster. -func (r *ProtocolLXD) UpdateClusterCertificate(certs api.ClusterCertificatePut, ETag string) error { - if !r.HasExtension("clustering_update_cert") { - return fmt.Errorf("The server is missing the required \"clustering_update_cert\" API extension") - } - - _, _, err := r.query("PUT", "/cluster/certificate", certs, ETag) - if err != nil { - return err - } - - return nil -} - -// GetClusterMemberState gets state information about a cluster member. -func (r *ProtocolLXD) GetClusterMemberState(name string) (*api.ClusterMemberState, string, error) { - err := r.CheckExtension("cluster_member_state") - if err != nil { - return nil, "", err - } - - state := api.ClusterMemberState{} - u := api.NewURL().Path("cluster", "members", name, "state") - etag, err := r.queryStruct("GET", u.String(), nil, "", &state) - if err != nil { - return nil, "", err - } - - return &state, etag, err -} - -// UpdateClusterMemberState evacuates or restores a cluster member. -func (r *ProtocolLXD) UpdateClusterMemberState(name string, state api.ClusterMemberStatePost) (Operation, error) { - if !r.HasExtension("clustering_evacuation") { - return nil, fmt.Errorf("The server is missing the required \"clustering_evacuation\" API extension") - } - - op, _, err := r.queryOperation("POST", fmt.Sprintf("/cluster/members/%s/state", name), state, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// GetClusterGroups returns the cluster groups. -func (r *ProtocolLXD) GetClusterGroups() ([]api.ClusterGroup, error) { - if !r.HasExtension("clustering_groups") { - return nil, fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") - } - - groups := []api.ClusterGroup{} - - _, err := r.queryStruct("GET", "/cluster/groups?recursion=1", nil, "", &groups) - if err != nil { - return nil, err - } - - return groups, nil -} - -// GetClusterGroupNames returns the cluster group names. -func (r *ProtocolLXD) GetClusterGroupNames() ([]string, error) { - if !r.HasExtension("clustering_groups") { - return nil, fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") - } - - urls := []string{} - - _, err := r.queryStruct("GET", "/cluster/groups", nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames("/1.0/cluster/groups", urls...) -} - -// RenameClusterGroup changes the name of an existing cluster group. -func (r *ProtocolLXD) RenameClusterGroup(name string, group api.ClusterGroupPost) error { - if !r.HasExtension("clustering_groups") { - return fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") - } - - _, _, err := r.query("POST", fmt.Sprintf("/cluster/groups/%s", name), group, "") - if err != nil { - return err - } - - return nil -} - -// CreateClusterGroup creates a new cluster group. -func (r *ProtocolLXD) CreateClusterGroup(group api.ClusterGroupsPost) error { - if !r.HasExtension("clustering_groups") { - return fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") - } - - _, _, err := r.query("POST", "/cluster/groups", group, "") - if err != nil { - return err - } - - return nil -} - -// DeleteClusterGroup deletes an existing cluster group. -func (r *ProtocolLXD) DeleteClusterGroup(name string) error { - if !r.HasExtension("clustering_groups") { - return fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") - } - - _, _, err := r.query("DELETE", fmt.Sprintf("/cluster/groups/%s", name), nil, "") - if err != nil { - return err - } - - return nil -} - -// UpdateClusterGroup updates information about the given cluster group. -func (r *ProtocolLXD) UpdateClusterGroup(name string, group api.ClusterGroupPut, ETag string) error { - if !r.HasExtension("clustering_groups") { - return fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") - } - - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/cluster/groups/%s", name), group, ETag) - if err != nil { - return err - } - - return nil -} - -// GetClusterGroup returns information about the given cluster group. -func (r *ProtocolLXD) GetClusterGroup(name string) (*api.ClusterGroup, string, error) { - if !r.HasExtension("clustering_groups") { - return nil, "", fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") - } - - group := api.ClusterGroup{} - etag, err := r.queryStruct("GET", fmt.Sprintf("/cluster/groups/%s", name), nil, "", &group) - if err != nil { - return nil, "", err - } - - return &group, etag, nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_containers.go b/vendor/github.com/lxc/lxd/client/lxd_containers.go deleted file mode 100644 index b8d960ee..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_containers.go +++ /dev/null @@ -1,1799 +0,0 @@ -package lxd - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strings" - - "github.com/gorilla/websocket" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/cancel" - "github.com/lxc/lxd/shared/ioprogress" - "github.com/lxc/lxd/shared/units" -) - -// Container handling functions -// -// Deprecated: Those functions are deprecated and won't be updated anymore. -// Please use the equivalent Instance function instead. - -// GetContainerNames returns a list of container names. -func (r *ProtocolLXD) GetContainerNames() ([]string, error) { - // Fetch the raw URL values. - urls := []string{} - baseURL := "/containers" - _, err := r.queryStruct("GET", "/containers", nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetContainers returns a list of containers. -func (r *ProtocolLXD) GetContainers() ([]api.Container, error) { - containers := []api.Container{} - - // Fetch the raw value - _, err := r.queryStruct("GET", "/containers?recursion=1", nil, "", &containers) - if err != nil { - return nil, err - } - - return containers, nil -} - -// GetContainersFull returns a list of containers including snapshots, backups and state. -func (r *ProtocolLXD) GetContainersFull() ([]api.ContainerFull, error) { - containers := []api.ContainerFull{} - - if !r.HasExtension("container_full") { - return nil, fmt.Errorf("The server is missing the required \"container_full\" API extension") - } - - // Fetch the raw value - _, err := r.queryStruct("GET", "/containers?recursion=2", nil, "", &containers) - if err != nil { - return nil, err - } - - return containers, nil -} - -// GetContainer returns the container entry for the provided name. -func (r *ProtocolLXD) GetContainer(name string) (*api.Container, string, error) { - container := api.Container{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/containers/%s", url.PathEscape(name)), nil, "", &container) - if err != nil { - return nil, "", err - } - - return &container, etag, nil -} - -// CreateContainerFromBackup is a convenience function to make it easier to -// create a container from a backup. -func (r *ProtocolLXD) CreateContainerFromBackup(args ContainerBackupArgs) (Operation, error) { - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - if args.PoolName == "" { - // Send the request - op, _, err := r.queryOperation("POST", "/containers", args.BackupFile, "") - if err != nil { - return nil, err - } - - return op, nil - } - - if !r.HasExtension("container_backup_override_pool") { - return nil, fmt.Errorf("The server is missing the required \"container_backup_override_pool\" API extension") - } - - // Prepare the HTTP request - reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0/containers", r.httpBaseURL.String())) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", reqURL, args.BackupFile) - if err != nil { - return nil, err - } - - req.Header.Set("Content-Type", "application/octet-stream") - req.Header.Set("X-LXD-pool", args.PoolName) - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - defer func() { _ = resp.Body.Close() }() - - // Handle errors - response, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - - // Get to the operation - respOperation, err := response.MetadataAsOperation() - if err != nil { - return nil, err - } - - // Setup an Operation wrapper - op := operation{ - Operation: *respOperation, - r: r, - chActive: make(chan bool), - } - - return &op, nil -} - -// CreateContainer requests that LXD creates a new container. -func (r *ProtocolLXD) CreateContainer(container api.ContainersPost) (Operation, error) { - if container.Source.ContainerOnly { - if !r.HasExtension("container_only_migration") { - return nil, fmt.Errorf("The server is missing the required \"container_only_migration\" API extension") - } - } - - // Send the request - op, _, err := r.queryOperation("POST", "/containers", container, "") - if err != nil { - return nil, err - } - - return op, nil -} - -func (r *ProtocolLXD) tryCreateContainer(req api.ContainersPost, urls []string) (RemoteOperation, error) { - if len(urls) == 0 { - return nil, fmt.Errorf("The source server isn't listening on the network") - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - operation := req.Source.Operation - - // Forward targetOp to remote op - go func() { - success := false - var errors []remoteOperationResult - for _, serverURL := range urls { - if operation == "" { - req.Source.Server = serverURL - } else { - req.Source.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) - } - - op, err := r.CreateContainer(req) - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - continue - } - - rop.targetOp = op - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - - if shared.IsConnectionError(err) { - continue - } - - break - } - - success = true - break - } - - if !success { - rop.err = remoteOperationError("Failed container creation", errors) - } - - close(rop.chDone) - }() - - return &rop, nil -} - -// CreateContainerFromImage is a convenience function to make it easier to create a container from an existing image. -func (r *ProtocolLXD) CreateContainerFromImage(source ImageServer, image api.Image, req api.ContainersPost) (RemoteOperation, error) { - // Set the minimal source fields - req.Source.Type = "image" - - // Optimization for the local image case - if r.isSameServer(source) { - // Always use fingerprints for local case - req.Source.Fingerprint = image.Fingerprint - req.Source.Alias = "" - - op, err := r.CreateContainer(req) - if err != nil { - return nil, err - } - - rop := remoteOperation{ - targetOp: op, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Minimal source fields for remote image - req.Source.Mode = "pull" - - // If we have an alias and the image is public, use that - if req.Source.Alias != "" && image.Public { - req.Source.Fingerprint = "" - } else { - req.Source.Fingerprint = image.Fingerprint - req.Source.Alias = "" - } - - // Get source server connection information - info, err := source.GetConnectionInfo() - if err != nil { - return nil, err - } - - req.Source.Protocol = info.Protocol - req.Source.Certificate = info.Certificate - - // Generate secret token if needed - if !image.Public { - secret, err := source.GetImageSecret(image.Fingerprint) - if err != nil { - return nil, err - } - - req.Source.Secret = secret - } - - return r.tryCreateContainer(req, info.Addresses) -} - -// CopyContainer copies a container from a remote server. Additional options can be passed using ContainerCopyArgs. -func (r *ProtocolLXD) CopyContainer(source InstanceServer, container api.Container, args *ContainerCopyArgs) (RemoteOperation, error) { - // Base request - req := api.ContainersPost{ - Name: container.Name, - ContainerPut: container.Writable(), - } - - req.Source.BaseImage = container.Config["volatile.base_image"] - - // Process the copy arguments - if args != nil { - // Quick checks. - if args.ContainerOnly { - if !r.HasExtension("container_only_migration") { - return nil, fmt.Errorf("The target server is missing the required \"container_only_migration\" API extension") - } - - if !source.HasExtension("container_only_migration") { - return nil, fmt.Errorf("The source server is missing the required \"container_only_migration\" API extension") - } - } - - if shared.StringInSlice(args.Mode, []string{"push", "relay"}) { - if !r.HasExtension("container_push") { - return nil, fmt.Errorf("The target server is missing the required \"container_push\" API extension") - } - - if !source.HasExtension("container_push") { - return nil, fmt.Errorf("The source server is missing the required \"container_push\" API extension") - } - } - - if args.Mode == "push" && !source.HasExtension("container_push_target") { - return nil, fmt.Errorf("The source server is missing the required \"container_push_target\" API extension") - } - - if args.Refresh { - if !r.HasExtension("container_incremental_copy") { - return nil, fmt.Errorf("The target server is missing the required \"container_incremental_copy\" API extension") - } - - if !source.HasExtension("container_incremental_copy") { - return nil, fmt.Errorf("The source server is missing the required \"container_incremental_copy\" API extension") - } - } - - // Allow overriding the target name - if args.Name != "" { - req.Name = args.Name - } - - req.Source.Live = args.Live - req.Source.ContainerOnly = args.ContainerOnly - req.Source.Refresh = args.Refresh - } - - if req.Source.Live { - req.Source.Live = container.StatusCode == api.Running - } - - sourceInfo, err := source.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get source connection info: %w", err) - } - - destInfo, err := r.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get destination connection info: %w", err) - } - - // Optimization for the local copy case - if destInfo.URL == sourceInfo.URL && destInfo.SocketPath == sourceInfo.SocketPath && (!r.IsClustered() || container.Location == r.clusterTarget || r.HasExtension("cluster_internal_copy")) { - // Project handling - if destInfo.Project != sourceInfo.Project { - if !r.HasExtension("container_copy_project") { - return nil, fmt.Errorf("The server is missing the required \"container_copy_project\" API extension") - } - - req.Source.Project = sourceInfo.Project - } - - // Local copy source fields - req.Source.Type = "copy" - req.Source.Source = container.Name - - // Copy the container - op, err := r.CreateContainer(req) - if err != nil { - return nil, err - } - - rop := remoteOperation{ - targetOp: op, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Source request - sourceReq := api.ContainerPost{ - Migration: true, - Live: req.Source.Live, - ContainerOnly: req.Source.ContainerOnly, - } - - // Push mode migration - if args != nil && args.Mode == "push" { - // Get target server connection information - info, err := r.GetConnectionInfo() - if err != nil { - return nil, err - } - - // Create the container - req.Source.Type = "migration" - req.Source.Mode = "push" - req.Source.Refresh = args.Refresh - - op, err := r.CreateContainer(req) - if err != nil { - return nil, err - } - - opAPI := op.Get() - - targetSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Prepare the source request - target := api.ContainerPostTarget{} - target.Operation = opAPI.ID - target.Websockets = targetSecrets - target.Certificate = info.Certificate - sourceReq.Target = &target - - return r.tryMigrateContainer(source, container.Name, sourceReq, info.Addresses) - } - - // Get source server connection information - info, err := source.GetConnectionInfo() - if err != nil { - return nil, err - } - - op, err := source.MigrateContainer(container.Name, sourceReq) - if err != nil { - return nil, err - } - - opAPI := op.Get() - - sourceSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - sourceSecrets[k] = v.(string) - } - - // Relay mode migration - if args != nil && args.Mode == "relay" { - // Push copy source fields - req.Source.Type = "migration" - req.Source.Mode = "push" - - // Start the process - targetOp, err := r.CreateContainer(req) - if err != nil { - return nil, err - } - - targetOpAPI := targetOp.Get() - - // Extract the websockets - targetSecrets := map[string]string{} - for k, v := range targetOpAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Launch the relay - err = r.proxyMigration(targetOp.(*operation), targetSecrets, source, op.(*operation), sourceSecrets) - if err != nil { - return nil, err - } - - // Prepare a tracking operation - rop := remoteOperation{ - targetOp: targetOp, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Pull mode migration - req.Source.Type = "migration" - req.Source.Mode = "pull" - req.Source.Operation = opAPI.ID - req.Source.Websockets = sourceSecrets - req.Source.Certificate = info.Certificate - - return r.tryCreateContainer(req, info.Addresses) -} - -// UpdateContainer updates the container definition. -func (r *ProtocolLXD) UpdateContainer(name string, container api.ContainerPut, ETag string) (Operation, error) { - // Send the request - op, _, err := r.queryOperation("PUT", fmt.Sprintf("/containers/%s", url.PathEscape(name)), container, ETag) - if err != nil { - return nil, err - } - - return op, nil -} - -// RenameContainer requests that LXD renames the container. -func (r *ProtocolLXD) RenameContainer(name string, container api.ContainerPost) (Operation, error) { - // Quick check. - if container.Migration { - return nil, fmt.Errorf("Can't ask for a migration through RenameContainer") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s", url.PathEscape(name)), container, "") - if err != nil { - return nil, err - } - - return op, nil -} - -func (r *ProtocolLXD) tryMigrateContainer(source InstanceServer, name string, req api.ContainerPost, urls []string) (RemoteOperation, error) { - if len(urls) == 0 { - return nil, fmt.Errorf("The target server isn't listening on the network") - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - operation := req.Target.Operation - - // Forward targetOp to remote op - go func() { - success := false - var errors []remoteOperationResult - for _, serverURL := range urls { - req.Target.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) - - op, err := source.MigrateContainer(name, req) - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - continue - } - - rop.targetOp = op - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - - if shared.IsConnectionError(err) { - continue - } - - break - } - - success = true - break - } - - if !success { - rop.err = remoteOperationError("Failed container migration", errors) - } - - close(rop.chDone) - }() - - return &rop, nil -} - -// MigrateContainer requests that LXD prepares for a container migration. -func (r *ProtocolLXD) MigrateContainer(name string, container api.ContainerPost) (Operation, error) { - if container.ContainerOnly { - if !r.HasExtension("container_only_migration") { - return nil, fmt.Errorf("The server is missing the required \"container_only_migration\" API extension") - } - } - - // Quick check. - if !container.Migration { - return nil, fmt.Errorf("Can't ask for a rename through MigrateContainer") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s", url.PathEscape(name)), container, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteContainer requests that LXD deletes the container. -func (r *ProtocolLXD) DeleteContainer(name string) (Operation, error) { - // Send the request - op, _, err := r.queryOperation("DELETE", fmt.Sprintf("/containers/%s", url.PathEscape(name)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// ExecContainer requests that LXD spawns a command inside the container. -func (r *ProtocolLXD) ExecContainer(containerName string, exec api.ContainerExecPost, args *ContainerExecArgs) (Operation, error) { - if exec.RecordOutput { - if !r.HasExtension("container_exec_recording") { - return nil, fmt.Errorf("The server is missing the required \"container_exec_recording\" API extension") - } - } - - if exec.User > 0 || exec.Group > 0 || exec.Cwd != "" { - if !r.HasExtension("container_exec_user_group_cwd") { - return nil, fmt.Errorf("The server is missing the required \"container_exec_user_group_cwd\" API extension") - } - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s/exec", url.PathEscape(containerName)), exec, "") - if err != nil { - return nil, err - } - - opAPI := op.Get() - - // Process additional arguments - if args != nil { - // Parse the fds - fds := map[string]string{} - - value, ok := opAPI.Metadata["fds"] - if ok { - values := value.(map[string]any) - for k, v := range values { - fds[k] = v.(string) - } - } - - // Call the control handler with a connection to the control socket - if args.Control != nil && fds[api.SecretNameControl] != "" { - conn, err := r.GetOperationWebsocket(opAPI.ID, fds[api.SecretNameControl]) - if err != nil { - return nil, err - } - - go args.Control(conn) - } - - if exec.Interactive { - // Handle interactive sections - if args.Stdin != nil && args.Stdout != nil { - // Connect to the websocket - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) - if err != nil { - return nil, err - } - - // And attach stdin and stdout to it - go func() { - shared.WebsocketSendStream(conn, args.Stdin, -1) - <-shared.WebsocketRecvStream(args.Stdout, conn) - _ = conn.Close() - - if args.DataDone != nil { - close(args.DataDone) - } - }() - } else { - if args.DataDone != nil { - close(args.DataDone) - } - } - } else { - // Handle non-interactive sessions - dones := map[int]chan bool{} - conns := []*websocket.Conn{} - - // Handle stdin - if fds["0"] != "" { - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) - if err != nil { - return nil, err - } - - conns = append(conns, conn) - dones[0] = shared.WebsocketSendStream(conn, args.Stdin, -1) - } - - // Handle stdout - if fds["1"] != "" { - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["1"]) - if err != nil { - return nil, err - } - - conns = append(conns, conn) - dones[1] = shared.WebsocketRecvStream(args.Stdout, conn) - } - - // Handle stderr - if fds["2"] != "" { - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["2"]) - if err != nil { - return nil, err - } - - conns = append(conns, conn) - dones[2] = shared.WebsocketRecvStream(args.Stderr, conn) - } - - // Wait for everything to be done - go func() { - for i, chDone := range dones { - // Skip stdin, dealing with it separately below - if i == 0 { - continue - } - - <-chDone - } - - if fds["0"] != "" { - if args.Stdin != nil { - _ = args.Stdin.Close() - } - - // Empty the stdin channel but don't block on it as - // stdin may be stuck in Read() - go func() { - <-dones[0] - }() - } - - for _, conn := range conns { - _ = conn.Close() - } - - if args.DataDone != nil { - close(args.DataDone) - } - }() - } - } - - return op, nil -} - -// GetContainerFile retrieves the provided path from the container. -func (r *ProtocolLXD) GetContainerFile(containerName string, path string) (io.ReadCloser, *ContainerFileResponse, error) { - // Prepare the HTTP request - requestURL, err := shared.URLEncode( - fmt.Sprintf("%s/1.0/containers/%s/files", r.httpBaseURL.String(), url.PathEscape(containerName)), - map[string]string{"path": path}) - if err != nil { - return nil, nil, err - } - - requestURL, err = r.setQueryAttributes(requestURL) - if err != nil { - return nil, nil, err - } - - req, err := http.NewRequest("GET", requestURL, nil) - if err != nil { - return nil, nil, err - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, nil, err - } - - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, nil, err - } - } - - // Parse the headers - uid, gid, mode, fileType, _ := shared.ParseLXDFileHeaders(resp.Header) - fileResp := ContainerFileResponse{ - UID: uid, - GID: gid, - Mode: mode, - Type: fileType, - } - - if fileResp.Type == "directory" { - // Decode the response - response := api.Response{} - decoder := json.NewDecoder(resp.Body) - - err = decoder.Decode(&response) - if err != nil { - return nil, nil, err - } - - // Get the file list - entries := []string{} - err = response.MetadataAsStruct(&entries) - if err != nil { - return nil, nil, err - } - - fileResp.Entries = entries - - return nil, &fileResp, err - } - - return resp.Body, &fileResp, err -} - -// CreateContainerFile tells LXD to create a file in the container. -func (r *ProtocolLXD) CreateContainerFile(containerName string, path string, args ContainerFileArgs) error { - if args.Type == "directory" { - if !r.HasExtension("directory_manipulation") { - return fmt.Errorf("The server is missing the required \"directory_manipulation\" API extension") - } - } - - if args.Type == "symlink" { - if !r.HasExtension("file_symlinks") { - return fmt.Errorf("The server is missing the required \"file_symlinks\" API extension") - } - } - - if args.WriteMode == "append" { - if !r.HasExtension("file_append") { - return fmt.Errorf("The server is missing the required \"file_append\" API extension") - } - } - - // Prepare the HTTP request - requestURL := fmt.Sprintf("%s/1.0/containers/%s/files?path=%s", r.httpBaseURL.String(), url.PathEscape(containerName), url.QueryEscape(path)) - - requestURL, err := r.setQueryAttributes(requestURL) - if err != nil { - return err - } - - req, err := http.NewRequest("POST", requestURL, args.Content) - if err != nil { - return err - } - - // Set the various headers - if args.UID > -1 { - req.Header.Set("X-LXD-uid", fmt.Sprintf("%d", args.UID)) - } - - if args.GID > -1 { - req.Header.Set("X-LXD-gid", fmt.Sprintf("%d", args.GID)) - } - - if args.Mode > -1 { - req.Header.Set("X-LXD-mode", fmt.Sprintf("%04o", args.Mode)) - } - - if args.Type != "" { - req.Header.Set("X-LXD-type", args.Type) - } - - if args.WriteMode != "" { - req.Header.Set("X-LXD-write", args.WriteMode) - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return err - } - - // Check the return value for a cleaner error - _, _, err = lxdParseResponse(resp) - if err != nil { - return err - } - - return nil -} - -// DeleteContainerFile deletes a file in the container. -func (r *ProtocolLXD) DeleteContainerFile(containerName string, path string) error { - if !r.HasExtension("file_delete") { - return fmt.Errorf("The server is missing the required \"file_delete\" API extension") - } - - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/containers/%s/files?path=%s", url.PathEscape(containerName), url.QueryEscape(path)), nil, "") - if err != nil { - return err - } - - return nil -} - -// GetContainerSnapshotNames returns a list of snapshot names for the container. -func (r *ProtocolLXD) GetContainerSnapshotNames(containerName string) ([]string, error) { - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("/containers/%s/snapshots", url.PathEscape(containerName)) - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetContainerSnapshots returns a list of snapshots for the container. -func (r *ProtocolLXD) GetContainerSnapshots(containerName string) ([]api.ContainerSnapshot, error) { - snapshots := []api.ContainerSnapshot{} - - // Fetch the raw value - _, err := r.queryStruct("GET", fmt.Sprintf("/containers/%s/snapshots?recursion=1", url.PathEscape(containerName)), nil, "", &snapshots) - if err != nil { - return nil, err - } - - return snapshots, nil -} - -// GetContainerSnapshot returns a Snapshot struct for the provided container and snapshot names. -func (r *ProtocolLXD) GetContainerSnapshot(containerName string, name string) (*api.ContainerSnapshot, string, error) { - snapshot := api.ContainerSnapshot{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/containers/%s/snapshots/%s", url.PathEscape(containerName), url.PathEscape(name)), nil, "", &snapshot) - if err != nil { - return nil, "", err - } - - return &snapshot, etag, nil -} - -// CreateContainerSnapshot requests that LXD creates a new snapshot for the container. -func (r *ProtocolLXD) CreateContainerSnapshot(containerName string, snapshot api.ContainerSnapshotsPost) (Operation, error) { - // Validate the request - if snapshot.ExpiresAt != nil && !r.HasExtension("snapshot_expiry_creation") { - return nil, fmt.Errorf("The server is missing the required \"snapshot_expiry_creation\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s/snapshots", url.PathEscape(containerName)), snapshot, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// CopyContainerSnapshot copies a snapshot from a remote server into a new container. Additional options can be passed using ContainerCopyArgs. -func (r *ProtocolLXD) CopyContainerSnapshot(source InstanceServer, containerName string, snapshot api.ContainerSnapshot, args *ContainerSnapshotCopyArgs) (RemoteOperation, error) { - // Backward compatibility (with broken Name field) - fields := strings.Split(snapshot.Name, shared.SnapshotDelimiter) - cName := containerName - sName := fields[len(fields)-1] - - // Base request - req := api.ContainersPost{ - Name: cName, - ContainerPut: api.ContainerPut{ - Architecture: snapshot.Architecture, - Config: snapshot.Config, - Devices: snapshot.Devices, - Ephemeral: snapshot.Ephemeral, - Profiles: snapshot.Profiles, - }, - } - - if snapshot.Stateful && args.Live { - if !r.HasExtension("container_snapshot_stateful_migration") { - return nil, fmt.Errorf("The server is missing the required \"container_snapshot_stateful_migration\" API extension") - } - - req.ContainerPut.Stateful = snapshot.Stateful - req.Source.Live = false // Snapshots are never running and so we don't need live migration. - } - - req.Source.BaseImage = snapshot.Config["volatile.base_image"] - - // Process the copy arguments - if args != nil { - // Quick checks. - if shared.StringInSlice(args.Mode, []string{"push", "relay"}) { - if !r.HasExtension("container_push") { - return nil, fmt.Errorf("The target server is missing the required \"container_push\" API extension") - } - - if !source.HasExtension("container_push") { - return nil, fmt.Errorf("The source server is missing the required \"container_push\" API extension") - } - } - - if args.Mode == "push" && !source.HasExtension("container_push_target") { - return nil, fmt.Errorf("The source server is missing the required \"container_push_target\" API extension") - } - - // Allow overriding the target name - if args.Name != "" { - req.Name = args.Name - } - } - - sourceInfo, err := source.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get source connection info: %w", err) - } - - destInfo, err := r.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get destination connection info: %w", err) - } - - container, _, err := source.GetContainer(cName) - if err != nil { - return nil, fmt.Errorf("Failed to get container info: %w", err) - } - - // Optimization for the local copy case - if destInfo.URL == sourceInfo.URL && destInfo.SocketPath == sourceInfo.SocketPath && (!r.IsClustered() || container.Location == r.clusterTarget || r.HasExtension("cluster_internal_copy")) { - // Project handling - if destInfo.Project != sourceInfo.Project { - if !r.HasExtension("container_copy_project") { - return nil, fmt.Errorf("The server is missing the required \"container_copy_project\" API extension") - } - - req.Source.Project = sourceInfo.Project - } - - // Local copy source fields - req.Source.Type = "copy" - req.Source.Source = fmt.Sprintf("%s/%s", cName, sName) - - // Copy the container - op, err := r.CreateContainer(req) - if err != nil { - return nil, err - } - - rop := remoteOperation{ - targetOp: op, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Source request - sourceReq := api.ContainerSnapshotPost{ - Migration: true, - Name: args.Name, - } - - if snapshot.Stateful && args.Live { - sourceReq.Live = args.Live - } - - // Push mode migration - if args != nil && args.Mode == "push" { - // Get target server connection information - info, err := r.GetConnectionInfo() - if err != nil { - return nil, err - } - - // Create the container - req.Source.Type = "migration" - req.Source.Mode = "push" - - op, err := r.CreateContainer(req) - if err != nil { - return nil, err - } - - opAPI := op.Get() - - targetSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Prepare the source request - target := api.ContainerPostTarget{} - target.Operation = opAPI.ID - target.Websockets = targetSecrets - target.Certificate = info.Certificate - sourceReq.Target = &target - - return r.tryMigrateContainerSnapshot(source, cName, sName, sourceReq, info.Addresses) - } - - // Get source server connection information - info, err := source.GetConnectionInfo() - if err != nil { - return nil, err - } - - op, err := source.MigrateContainerSnapshot(cName, sName, sourceReq) - if err != nil { - return nil, err - } - - opAPI := op.Get() - - sourceSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - sourceSecrets[k] = v.(string) - } - - // Relay mode migration - if args != nil && args.Mode == "relay" { - // Push copy source fields - req.Source.Type = "migration" - req.Source.Mode = "push" - - // Start the process - targetOp, err := r.CreateContainer(req) - if err != nil { - return nil, err - } - - targetOpAPI := targetOp.Get() - - // Extract the websockets - targetSecrets := map[string]string{} - for k, v := range targetOpAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Launch the relay - err = r.proxyMigration(targetOp.(*operation), targetSecrets, source, op.(*operation), sourceSecrets) - if err != nil { - return nil, err - } - - // Prepare a tracking operation - rop := remoteOperation{ - targetOp: targetOp, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Pull mode migration - req.Source.Type = "migration" - req.Source.Mode = "pull" - req.Source.Operation = opAPI.ID - req.Source.Websockets = sourceSecrets - req.Source.Certificate = info.Certificate - - return r.tryCreateContainer(req, info.Addresses) -} - -// RenameContainerSnapshot requests that LXD renames the snapshot. -func (r *ProtocolLXD) RenameContainerSnapshot(containerName string, name string, container api.ContainerSnapshotPost) (Operation, error) { - // Quick check. - if container.Migration { - return nil, fmt.Errorf("Can't ask for a migration through RenameContainerSnapshot") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s/snapshots/%s", url.PathEscape(containerName), url.PathEscape(name)), container, "") - if err != nil { - return nil, err - } - - return op, nil -} - -func (r *ProtocolLXD) tryMigrateContainerSnapshot(source InstanceServer, containerName string, name string, req api.ContainerSnapshotPost, urls []string) (RemoteOperation, error) { - if len(urls) == 0 { - return nil, fmt.Errorf("The target server isn't listening on the network") - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - operation := req.Target.Operation - - // Forward targetOp to remote op - go func() { - success := false - var errors []remoteOperationResult - for _, serverURL := range urls { - req.Target.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) - - op, err := source.MigrateContainerSnapshot(containerName, name, req) - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - continue - } - - rop.targetOp = op - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - - if shared.IsConnectionError(err) { - continue - } - - break - } - - success = true - break - } - - if !success { - rop.err = remoteOperationError("Failed container migration", errors) - } - - close(rop.chDone) - }() - - return &rop, nil -} - -// MigrateContainerSnapshot requests that LXD prepares for a snapshot migration. -func (r *ProtocolLXD) MigrateContainerSnapshot(containerName string, name string, container api.ContainerSnapshotPost) (Operation, error) { - // Quick check. - if !container.Migration { - return nil, fmt.Errorf("Can't ask for a rename through MigrateContainerSnapshot") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s/snapshots/%s", url.PathEscape(containerName), url.PathEscape(name)), container, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteContainerSnapshot requests that LXD deletes the container snapshot. -func (r *ProtocolLXD) DeleteContainerSnapshot(containerName string, name string) (Operation, error) { - // Send the request - op, _, err := r.queryOperation("DELETE", fmt.Sprintf("/containers/%s/snapshots/%s", url.PathEscape(containerName), url.PathEscape(name)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// UpdateContainerSnapshot requests that LXD updates the container snapshot. -func (r *ProtocolLXD) UpdateContainerSnapshot(containerName string, name string, container api.ContainerSnapshotPut, ETag string) (Operation, error) { - if !r.HasExtension("snapshot_expiry") { - return nil, fmt.Errorf("The server is missing the required \"snapshot_expiry\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("PUT", fmt.Sprintf("/containers/%s/snapshots/%s", - url.PathEscape(containerName), url.PathEscape(name)), container, ETag) - if err != nil { - return nil, err - } - - return op, nil -} - -// GetContainerState returns a ContainerState entry for the provided container name. -func (r *ProtocolLXD) GetContainerState(name string) (*api.ContainerState, string, error) { - state := api.ContainerState{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/containers/%s/state", url.PathEscape(name)), nil, "", &state) - if err != nil { - return nil, "", err - } - - return &state, etag, nil -} - -// UpdateContainerState updates the container to match the requested state. -func (r *ProtocolLXD) UpdateContainerState(name string, state api.ContainerStatePut, ETag string) (Operation, error) { - // Send the request - op, _, err := r.queryOperation("PUT", fmt.Sprintf("/containers/%s/state", url.PathEscape(name)), state, ETag) - if err != nil { - return nil, err - } - - return op, nil -} - -// GetContainerLogfiles returns a list of logfiles for the container. -func (r *ProtocolLXD) GetContainerLogfiles(name string) ([]string, error) { - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("/containers/%s/logs", url.PathEscape(name)) - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetContainerLogfile returns the content of the requested logfile -// -// Note that it's the caller's responsibility to close the returned ReadCloser. -func (r *ProtocolLXD) GetContainerLogfile(name string, filename string) (io.ReadCloser, error) { - // Prepare the HTTP request - url := fmt.Sprintf("%s/1.0/containers/%s/logs/%s", r.httpBaseURL.String(), url.PathEscape(name), url.PathEscape(filename)) - - url, err := r.setQueryAttributes(url) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - } - - return resp.Body, err -} - -// DeleteContainerLogfile deletes the requested logfile. -func (r *ProtocolLXD) DeleteContainerLogfile(name string, filename string) error { - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/containers/%s/logs/%s", url.PathEscape(name), url.PathEscape(filename)), nil, "") - if err != nil { - return err - } - - return nil -} - -// GetContainerMetadata returns container metadata. -func (r *ProtocolLXD) GetContainerMetadata(name string) (*api.ImageMetadata, string, error) { - if !r.HasExtension("container_edit_metadata") { - return nil, "", fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - metadata := api.ImageMetadata{} - - url := fmt.Sprintf("/containers/%s/metadata", url.PathEscape(name)) - etag, err := r.queryStruct("GET", url, nil, "", &metadata) - if err != nil { - return nil, "", err - } - - return &metadata, etag, err -} - -// SetContainerMetadata sets the content of the container metadata file. -func (r *ProtocolLXD) SetContainerMetadata(name string, metadata api.ImageMetadata, ETag string) error { - if !r.HasExtension("container_edit_metadata") { - return fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - url := fmt.Sprintf("/containers/%s/metadata", url.PathEscape(name)) - _, _, err := r.query("PUT", url, metadata, ETag) - if err != nil { - return err - } - - return nil -} - -// GetContainerTemplateFiles returns the list of names of template files for a container. -func (r *ProtocolLXD) GetContainerTemplateFiles(containerName string) ([]string, error) { - if !r.HasExtension("container_edit_metadata") { - return nil, fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - templates := []string{} - - url := fmt.Sprintf("/containers/%s/metadata/templates", url.PathEscape(containerName)) - _, err := r.queryStruct("GET", url, nil, "", &templates) - if err != nil { - return nil, err - } - - return templates, nil -} - -// GetContainerTemplateFile returns the content of a template file for a container. -func (r *ProtocolLXD) GetContainerTemplateFile(containerName string, templateName string) (io.ReadCloser, error) { - if !r.HasExtension("container_edit_metadata") { - return nil, fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - url := fmt.Sprintf("%s/1.0/containers/%s/metadata/templates?path=%s", r.httpBaseURL.String(), url.PathEscape(containerName), url.QueryEscape(templateName)) - - url, err := r.setQueryAttributes(url) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - } - - return resp.Body, err -} - -// CreateContainerTemplateFile creates an a template for a container. -func (r *ProtocolLXD) CreateContainerTemplateFile(containerName string, templateName string, content io.ReadSeeker) error { - if !r.HasExtension("container_edit_metadata") { - return fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - url := fmt.Sprintf("%s/1.0/containers/%s/metadata/templates?path=%s", r.httpBaseURL.String(), url.PathEscape(containerName), url.QueryEscape(templateName)) - - url, err := r.setQueryAttributes(url) - if err != nil { - return err - } - - req, err := http.NewRequest("POST", url, content) - if err != nil { - return err - } - - req.Header.Set("Content-Type", "application/octet-stream") - - // Send the request - resp, err := r.DoHTTP(req) - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return err - } - } - return err -} - -// UpdateContainerTemplateFile updates the content for a container template file. -func (r *ProtocolLXD) UpdateContainerTemplateFile(containerName string, templateName string, content io.ReadSeeker) error { - return r.CreateContainerTemplateFile(containerName, templateName, content) -} - -// DeleteContainerTemplateFile deletes a template file for a container. -func (r *ProtocolLXD) DeleteContainerTemplateFile(name string, templateName string) error { - if !r.HasExtension("container_edit_metadata") { - return fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - _, _, err := r.query("DELETE", fmt.Sprintf("/containers/%s/metadata/templates?path=%s", url.PathEscape(name), url.QueryEscape(templateName)), nil, "") - return err -} - -// ConsoleContainer requests that LXD attaches to the console device of a container. -func (r *ProtocolLXD) ConsoleContainer(containerName string, console api.ContainerConsolePost, args *ContainerConsoleArgs) (Operation, error) { - if !r.HasExtension("console") { - return nil, fmt.Errorf("The server is missing the required \"console\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s/console", url.PathEscape(containerName)), console, "") - if err != nil { - return nil, err - } - - opAPI := op.Get() - - if args == nil || args.Terminal == nil { - return nil, fmt.Errorf("A terminal must be set") - } - - if args.Control == nil { - return nil, fmt.Errorf("A control channel must be set") - } - - // Parse the fds - fds := map[string]string{} - - value, ok := opAPI.Metadata["fds"] - if ok { - values := value.(map[string]any) - for k, v := range values { - fds[k] = v.(string) - } - } - - var controlConn *websocket.Conn - // Call the control handler with a connection to the control socket - if fds[api.SecretNameControl] == "" { - return nil, fmt.Errorf("Did not receive a file descriptor for the control channel") - } - - controlConn, err = r.GetOperationWebsocket(opAPI.ID, fds[api.SecretNameControl]) - if err != nil { - return nil, err - } - - go args.Control(controlConn) - - // Connect to the websocket - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) - if err != nil { - return nil, err - } - - // Detach from console. - go func(consoleDisconnect <-chan bool) { - <-consoleDisconnect - msg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "Detaching from console") - // We don't care if this fails. This is just for convenience. - _ = controlConn.WriteMessage(websocket.CloseMessage, msg) - _ = controlConn.Close() - }(args.ConsoleDisconnect) - - // And attach stdin and stdout to it - go func() { - shared.WebsocketSendStream(conn, args.Terminal, -1) - <-shared.WebsocketRecvStream(args.Terminal, conn) - _ = conn.Close() - }() - - return op, nil -} - -// GetContainerConsoleLog requests that LXD attaches to the console device of a container. -// -// Note that it's the caller's responsibility to close the returned ReadCloser. -func (r *ProtocolLXD) GetContainerConsoleLog(containerName string, args *ContainerConsoleLogArgs) (io.ReadCloser, error) { - if !r.HasExtension("console") { - return nil, fmt.Errorf("The server is missing the required \"console\" API extension") - } - - // Prepare the HTTP request - url := fmt.Sprintf("%s/1.0/containers/%s/console", r.httpBaseURL.String(), url.PathEscape(containerName)) - - url, err := r.setQueryAttributes(url) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - } - - return resp.Body, err -} - -// DeleteContainerConsoleLog deletes the requested container's console log. -func (r *ProtocolLXD) DeleteContainerConsoleLog(containerName string, args *ContainerConsoleLogArgs) error { - if !r.HasExtension("console") { - return fmt.Errorf("The server is missing the required \"console\" API extension") - } - - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/containers/%s/console", url.PathEscape(containerName)), nil, "") - if err != nil { - return err - } - - return nil -} - -// GetContainerBackupNames returns a list of backup names for the container. -func (r *ProtocolLXD) GetContainerBackupNames(containerName string) ([]string, error) { - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("/containers/%s/backups", url.PathEscape(containerName)) - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetContainerBackups returns a list of backups for the container. -func (r *ProtocolLXD) GetContainerBackups(containerName string) ([]api.ContainerBackup, error) { - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Fetch the raw value - backups := []api.ContainerBackup{} - - _, err := r.queryStruct("GET", fmt.Sprintf("/containers/%s/backups?recursion=1", url.PathEscape(containerName)), nil, "", &backups) - if err != nil { - return nil, err - } - - return backups, nil -} - -// GetContainerBackup returns a Backup struct for the provided container and backup names. -func (r *ProtocolLXD) GetContainerBackup(containerName string, name string) (*api.ContainerBackup, string, error) { - if !r.HasExtension("container_backup") { - return nil, "", fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Fetch the raw value - backup := api.ContainerBackup{} - etag, err := r.queryStruct("GET", fmt.Sprintf("/containers/%s/backups/%s", url.PathEscape(containerName), url.PathEscape(name)), nil, "", &backup) - if err != nil { - return nil, "", err - } - - return &backup, etag, nil -} - -// CreateContainerBackup requests that LXD creates a new backup for the container. -func (r *ProtocolLXD) CreateContainerBackup(containerName string, backup api.ContainerBackupsPost) (Operation, error) { - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s/backups", - url.PathEscape(containerName)), backup, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// RenameContainerBackup requests that LXD renames the backup. -func (r *ProtocolLXD) RenameContainerBackup(containerName string, name string, backup api.ContainerBackupPost) (Operation, error) { - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/containers/%s/backups/%s", - url.PathEscape(containerName), url.PathEscape(name)), backup, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteContainerBackup requests that LXD deletes the container backup. -func (r *ProtocolLXD) DeleteContainerBackup(containerName string, name string) (Operation, error) { - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("DELETE", fmt.Sprintf("/containers/%s/backups/%s", - url.PathEscape(containerName), url.PathEscape(name)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// GetContainerBackupFile requests the container backup content. -func (r *ProtocolLXD) GetContainerBackupFile(containerName string, name string, req *BackupFileRequest) (*BackupFileResponse, error) { - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Build the URL - uri := fmt.Sprintf("%s/1.0/containers/%s/backups/%s/export", r.httpBaseURL.String(), - url.PathEscape(containerName), url.PathEscape(name)) - if r.project != "" { - uri += fmt.Sprintf("?project=%s", url.QueryEscape(r.project)) - } - - // Prepare the download request - request, err := http.NewRequest("GET", uri, nil) - if err != nil { - return nil, err - } - - if r.httpUserAgent != "" { - request.Header.Set("User-Agent", r.httpUserAgent) - } - - // Start the request - response, doneCh, err := cancel.CancelableDownload(req.Canceler, r.http, request) - if err != nil { - return nil, err - } - - defer func() { _ = response.Body.Close() }() - defer close(doneCh) - - if response.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(response) - if err != nil { - return nil, err - } - } - - // Handle the data - body := response.Body - if req.ProgressHandler != nil { - body = &ioprogress.ProgressReader{ - ReadCloser: response.Body, - Tracker: &ioprogress.ProgressTracker{ - Length: response.ContentLength, - Handler: func(percent int64, speed int64) { - req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))}) - }, - }, - } - } - - size, err := io.Copy(req.BackupFile, body) - if err != nil { - return nil, err - } - - resp := BackupFileResponse{} - resp.Size = size - - return &resp, nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_events.go b/vendor/github.com/lxc/lxd/client/lxd_events.go deleted file mode 100644 index cecdca0f..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_events.go +++ /dev/null @@ -1,197 +0,0 @@ -package lxd - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/gorilla/websocket" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" -) - -// Event handling functions - -// getEvents connects to the LXD monitoring interface. -func (r *ProtocolLXD) getEvents(allProjects bool) (*EventListener, error) { - // Prevent anything else from interacting with the listeners - r.eventListenersLock.Lock() - defer r.eventListenersLock.Unlock() - - ctx, cancel := context.WithCancel(context.Background()) - - // Setup a new listener - listener := EventListener{ - r: r, - ctx: ctx, - ctxCancel: cancel, - } - - connInfo, _ := r.GetConnectionInfo() - if connInfo.Project == "" { - return nil, fmt.Errorf("Unexpected empty project in connection info") - } - - if !allProjects { - listener.projectName = connInfo.Project - } - - // There is an existing Go routine for the required project filter, so just add another target. - if r.eventListeners[listener.projectName] != nil { - r.eventListeners[listener.projectName] = append(r.eventListeners[listener.projectName], &listener) - return &listener, nil - } - - // Setup a new connection with LXD - var url string - var err error - if allProjects { - url, err = r.setQueryAttributes("/events?all-projects=true") - } else { - url, err = r.setQueryAttributes("/events") - } - - if err != nil { - return nil, err - } - - // Connect websocket and save. - wsConn, err := r.websocket(url) - if err != nil { - return nil, err - } - - r.eventConnsLock.Lock() - r.eventConns[listener.projectName] = wsConn // Save for others to use. - r.eventConnsLock.Unlock() - - // Initialize the event listener list if we were able to connect to the events websocket. - r.eventListeners[listener.projectName] = []*EventListener{&listener} - - // Spawn a watcher that will close the websocket connection after all - // listeners are gone. - stopCh := make(chan struct{}) - go func() { - for { - select { - case <-time.After(time.Minute): - case <-r.ctxConnected.Done(): - case <-stopCh: - } - - r.eventListenersLock.Lock() - r.eventConnsLock.Lock() - if len(r.eventListeners[listener.projectName]) == 0 { - // We don't need the connection anymore, disconnect and clear. - if r.eventListeners[listener.projectName] != nil { - _ = r.eventConns[listener.projectName].Close() - delete(r.eventConns, listener.projectName) - } - - r.eventListeners[listener.projectName] = nil - r.eventListenersLock.Unlock() - r.eventConnsLock.Unlock() - - return - } - - r.eventListenersLock.Unlock() - r.eventConnsLock.Unlock() - } - }() - - // Spawn the listener - go func() { - for { - _, data, err := wsConn.ReadMessage() - if err != nil { - // Prevent anything else from interacting with the listeners - r.eventListenersLock.Lock() - defer r.eventListenersLock.Unlock() - - // Tell all the current listeners about the failure - for _, listener := range r.eventListeners[listener.projectName] { - listener.err = err - listener.ctxCancel() - } - - // And remove them all from the list so that when watcher routine runs it will - // close the websocket connection. - r.eventListeners[listener.projectName] = nil - - close(stopCh) // Instruct watcher go routine to cleanup. - - return - } - - // Attempt to unpack the message - event := api.Event{} - err = json.Unmarshal(data, &event) - if err != nil { - continue - } - - // Extract the message type - if event.Type == "" { - continue - } - - // Send the message to all handlers - r.eventListenersLock.Lock() - for _, listener := range r.eventListeners[listener.projectName] { - listener.targetsLock.Lock() - for _, target := range listener.targets { - if target.types != nil && !shared.StringInSlice(event.Type, target.types) { - continue - } - - go target.function(event) - } - - listener.targetsLock.Unlock() - } - - r.eventListenersLock.Unlock() - } - }() - - return &listener, nil -} - -// GetEvents gets the events for the project defined on the client. -func (r *ProtocolLXD) GetEvents() (*EventListener, error) { - return r.getEvents(false) -} - -// GetEventsAllProjects gets events for all projects. -func (r *ProtocolLXD) GetEventsAllProjects() (*EventListener, error) { - return r.getEvents(true) -} - -// SendEvent send an event to the server via the client's event listener connection. -func (r *ProtocolLXD) SendEvent(event api.Event) error { - r.eventConnsLock.Lock() - defer r.eventConnsLock.Unlock() - - // Find an available event listener connection. - // It doesn't matter which project the event listener connection is using, as this only affects which - // events are received from the server, not which events we can send to it. - var eventConn *websocket.Conn - for _, eventConn = range r.eventConns { - break - } - - if eventConn == nil { - return fmt.Errorf("No available event listener connection") - } - - deadline, ok := r.ctx.Deadline() - if !ok { - deadline = time.Now().Add(5 * time.Second) - } - - _ = eventConn.SetWriteDeadline(deadline) - return eventConn.WriteJSON(event) -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_images.go b/vendor/github.com/lxc/lxd/client/lxd_images.go deleted file mode 100644 index f9bfc65f..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_images.go +++ /dev/null @@ -1,1005 +0,0 @@ -package lxd - -import ( - "crypto/sha256" - "fmt" - "io" - "mime" - "mime/multipart" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/cancel" - "github.com/lxc/lxd/shared/ioprogress" - "github.com/lxc/lxd/shared/units" -) - -// Image handling functions - -// GetImages returns a list of available images as Image structs. -func (r *ProtocolLXD) GetImages() ([]api.Image, error) { - images := []api.Image{} - - _, err := r.queryStruct("GET", "/images?recursion=1", nil, "", &images) - if err != nil { - return nil, err - } - - return images, nil -} - -// GetImagesWithFilter returns a filtered list of available images as Image structs. -func (r *ProtocolLXD) GetImagesWithFilter(filters []string) ([]api.Image, error) { - if !r.HasExtension("api_filtering") { - return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") - } - - images := []api.Image{} - - v := url.Values{} - v.Set("recursion", "1") - v.Set("filter", parseFilters(filters)) - - _, err := r.queryStruct("GET", fmt.Sprintf("/images?%s", v.Encode()), nil, "", &images) - if err != nil { - return nil, err - } - - return images, nil -} - -// GetImageFingerprints returns a list of available image fingerprints. -func (r *ProtocolLXD) GetImageFingerprints() ([]string, error) { - // Fetch the raw URL values. - urls := []string{} - baseURL := "/images" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetImage returns an Image struct for the provided fingerprint. -func (r *ProtocolLXD) GetImage(fingerprint string) (*api.Image, string, error) { - return r.GetPrivateImage(fingerprint, "") -} - -// GetImageFile downloads an image from the server, returning an ImageFileRequest struct. -func (r *ProtocolLXD) GetImageFile(fingerprint string, req ImageFileRequest) (*ImageFileResponse, error) { - return r.GetPrivateImageFile(fingerprint, "", req) -} - -// GetImageSecret is a helper around CreateImageSecret that returns a secret for the image. -func (r *ProtocolLXD) GetImageSecret(fingerprint string) (string, error) { - op, err := r.CreateImageSecret(fingerprint) - if err != nil { - return "", err - } - - opAPI := op.Get() - - return opAPI.Metadata["secret"].(string), nil -} - -// GetPrivateImage is similar to GetImage but allows passing a secret download token. -func (r *ProtocolLXD) GetPrivateImage(fingerprint string, secret string) (*api.Image, string, error) { - image := api.Image{} - - // Build the API path - path := fmt.Sprintf("/images/%s", url.PathEscape(fingerprint)) - var err error - path, err = r.setQueryAttributes(path) - if err != nil { - return nil, "", err - } - - if secret != "" { - path, err = setQueryParam(path, "secret", secret) - if err != nil { - return nil, "", err - } - } - - // Fetch the raw value - etag, err := r.queryStruct("GET", path, nil, "", &image) - if err != nil { - return nil, "", err - } - - return &image, etag, nil -} - -// GetPrivateImageFile is similar to GetImageFile but allows passing a secret download token. -func (r *ProtocolLXD) GetPrivateImageFile(fingerprint string, secret string, req ImageFileRequest) (*ImageFileResponse, error) { - // Quick checks. - if req.MetaFile == nil && req.RootfsFile == nil { - return nil, fmt.Errorf("No file requested") - } - - uri := fmt.Sprintf("/1.0/images/%s/export", url.PathEscape(fingerprint)) - - var err error - uri, err = r.setQueryAttributes(uri) - if err != nil { - return nil, err - } - - // Attempt to download from host - if secret == "" && shared.PathExists("/dev/lxd/sock") && os.Geteuid() == 0 { - unixURI := fmt.Sprintf("http://unix.socket%s", uri) - - // Setup the HTTP client - devlxdHTTP, err := unixHTTPClient(nil, "/dev/lxd/sock") - if err == nil { - resp, err := lxdDownloadImage(fingerprint, unixURI, r.httpUserAgent, devlxdHTTP, req) - if err == nil { - return resp, nil - } - } - } - - // Build the URL - uri = fmt.Sprintf("%s%s", r.httpBaseURL.String(), uri) - if secret != "" { - uri, err = setQueryParam(uri, "secret", secret) - if err != nil { - return nil, err - } - } - - // Use relatively short response header timeout so as not to hold the image lock open too long. - // Deference client and transport in order to clone them so as to not modify timeout of base client. - httpClient := *r.http - httpTransport := httpClient.Transport.(*http.Transport).Clone() - httpTransport.ResponseHeaderTimeout = 30 * time.Second - httpClient.Transport = httpTransport - - return lxdDownloadImage(fingerprint, uri, r.httpUserAgent, &httpClient, req) -} - -func lxdDownloadImage(fingerprint string, uri string, userAgent string, client *http.Client, req ImageFileRequest) (*ImageFileResponse, error) { - // Prepare the response - resp := ImageFileResponse{} - - // Prepare the download request - request, err := http.NewRequest("GET", uri, nil) - if err != nil { - return nil, err - } - - if userAgent != "" { - request.Header.Set("User-Agent", userAgent) - } - - // Start the request - response, doneCh, err := cancel.CancelableDownload(req.Canceler, client, request) - if err != nil { - return nil, err - } - - defer func() { _ = response.Body.Close() }() - defer close(doneCh) - - if response.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(response) - if err != nil { - return nil, err - } - } - - ctype, ctypeParams, err := mime.ParseMediaType(response.Header.Get("Content-Type")) - if err != nil { - ctype = "application/octet-stream" - } - - // Handle the data - body := response.Body - if req.ProgressHandler != nil { - reader := &ioprogress.ProgressReader{ - ReadCloser: response.Body, - Tracker: &ioprogress.ProgressTracker{ - Length: response.ContentLength, - }, - } - - if response.ContentLength > 0 { - reader.Tracker.Handler = func(percent int64, speed int64) { - req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))}) - } - } else { - reader.Tracker.Handler = func(received int64, speed int64) { - req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%s (%s/s)", units.GetByteSizeString(received, 2), units.GetByteSizeString(speed, 2))}) - } - } - - body = reader - } - - // Hashing - sha256 := sha256.New() - - // Deal with split images - if ctype == "multipart/form-data" { - if req.MetaFile == nil || req.RootfsFile == nil { - return nil, fmt.Errorf("Multi-part image but only one target file provided") - } - - // Parse the POST data - mr := multipart.NewReader(body, ctypeParams["boundary"]) - - // Get the metadata tarball - part, err := mr.NextPart() - if err != nil { - return nil, err - } - - if part.FormName() != "metadata" { - return nil, fmt.Errorf("Invalid multipart image") - } - - size, err := io.Copy(io.MultiWriter(req.MetaFile, sha256), part) - if err != nil { - return nil, err - } - - resp.MetaSize = size - resp.MetaName = part.FileName() - - // Get the rootfs tarball - part, err = mr.NextPart() - if err != nil { - return nil, err - } - - if !shared.StringInSlice(part.FormName(), []string{"rootfs", "rootfs.img"}) { - return nil, fmt.Errorf("Invalid multipart image") - } - - size, err = io.Copy(io.MultiWriter(req.RootfsFile, sha256), part) - if err != nil { - return nil, err - } - - resp.RootfsSize = size - resp.RootfsName = part.FileName() - - // Check the hash - hash := fmt.Sprintf("%x", sha256.Sum(nil)) - if !strings.HasPrefix(hash, fingerprint) { - return nil, fmt.Errorf("Image fingerprint doesn't match. Got %s expected %s", hash, fingerprint) - } - - return &resp, nil - } - - // Deal with unified images - _, cdParams, err := mime.ParseMediaType(response.Header.Get("Content-Disposition")) - if err != nil { - return nil, err - } - - filename, ok := cdParams["filename"] - if !ok { - return nil, fmt.Errorf("No filename in Content-Disposition header") - } - - size, err := io.Copy(io.MultiWriter(req.MetaFile, sha256), body) - if err != nil { - return nil, err - } - - resp.MetaSize = size - resp.MetaName = filename - - // Check the hash - hash := fmt.Sprintf("%x", sha256.Sum(nil)) - if !strings.HasPrefix(hash, fingerprint) { - return nil, fmt.Errorf("Image fingerprint doesn't match. Got %s expected %s", hash, fingerprint) - } - - return &resp, nil -} - -// GetImageAliases returns the list of available aliases as ImageAliasesEntry structs. -func (r *ProtocolLXD) GetImageAliases() ([]api.ImageAliasesEntry, error) { - aliases := []api.ImageAliasesEntry{} - - // Fetch the raw value - _, err := r.queryStruct("GET", "/images/aliases?recursion=1", nil, "", &aliases) - if err != nil { - return nil, err - } - - return aliases, nil -} - -// GetImageAliasNames returns the list of available alias names. -func (r *ProtocolLXD) GetImageAliasNames() ([]string, error) { - // Fetch the raw URL values. - urls := []string{} - baseURL := "/images/aliases" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetImageAlias returns an existing alias as an ImageAliasesEntry struct. -func (r *ProtocolLXD) GetImageAlias(name string) (*api.ImageAliasesEntry, string, error) { - alias := api.ImageAliasesEntry{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/images/aliases/%s", url.PathEscape(name)), nil, "", &alias) - if err != nil { - return nil, "", err - } - - return &alias, etag, nil -} - -// GetImageAliasType returns an existing alias as an ImageAliasesEntry struct. -func (r *ProtocolLXD) GetImageAliasType(imageType string, name string) (*api.ImageAliasesEntry, string, error) { - alias, etag, err := r.GetImageAlias(name) - if err != nil { - return nil, "", err - } - - if imageType != "" { - if alias.Type == "" { - alias.Type = "container" - } - - if alias.Type != imageType { - return nil, "", fmt.Errorf("Alias doesn't exist for the specified type") - } - } - - return alias, etag, nil -} - -// GetImageAliasArchitectures returns a map of architectures / targets. -func (r *ProtocolLXD) GetImageAliasArchitectures(imageType string, name string) (map[string]*api.ImageAliasesEntry, error) { - alias, _, err := r.GetImageAliasType(imageType, name) - if err != nil { - return nil, err - } - - img, _, err := r.GetImage(alias.Target) - if err != nil { - return nil, err - } - - return map[string]*api.ImageAliasesEntry{img.Architecture: alias}, nil -} - -// CreateImage requests that LXD creates, copies or import a new image. -func (r *ProtocolLXD) CreateImage(image api.ImagesPost, args *ImageCreateArgs) (Operation, error) { - if image.CompressionAlgorithm != "" { - if !r.HasExtension("image_compression_algorithm") { - return nil, fmt.Errorf("The server is missing the required \"image_compression_algorithm\" API extension") - } - } - - // Send the JSON based request - if args == nil { - op, _, err := r.queryOperation("POST", "/images", image, "") - if err != nil { - return nil, err - } - - return op, nil - } - - // Prepare an image upload - if args.MetaFile == nil { - return nil, fmt.Errorf("Metadata file is required") - } - - // Prepare the body - var ioErr error - var body io.Reader - var contentType string - if args.RootfsFile == nil { - // If unified image, just pass it through - body = args.MetaFile - - contentType = "application/octet-stream" - } else { - pr, pw := io.Pipe() - // Setup the multipart writer - w := multipart.NewWriter(pw) - - go func() { - defer func() { - w.Close() - pw.Close() - }() - - // Metadata file - fw, ioErr := w.CreateFormFile("metadata", args.MetaName) - if ioErr != nil { - return - } - - _, ioErr = io.Copy(fw, args.MetaFile) - if ioErr != nil { - return - } - - // Rootfs file - if args.Type == "virtual-machine" { - fw, ioErr = w.CreateFormFile("rootfs.img", args.RootfsName) - } else { - fw, ioErr = w.CreateFormFile("rootfs", args.RootfsName) - } - - if ioErr != nil { - return - } - - _, ioErr = io.Copy(fw, args.RootfsFile) - if ioErr != nil { - return - } - - // Done writing to multipart - ioErr = w.Close() - if ioErr != nil { - return - } - - ioErr = pw.Close() - if ioErr != nil { - return - } - }() - - // Setup progress handler - if args.ProgressHandler != nil { - body = &ioprogress.ProgressReader{ - ReadCloser: pr, - Tracker: &ioprogress.ProgressTracker{ - Handler: func(received int64, speed int64) { - args.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%s (%s/s)", units.GetByteSizeString(received, 2), units.GetByteSizeString(speed, 2))}) - }, - }, - } - } else { - body = pr - } - - contentType = w.FormDataContentType() - } - - // Prepare the HTTP request - reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0/images", r.httpBaseURL.String())) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", reqURL, body) - if err != nil { - return nil, err - } - - // Setup the headers - req.Header.Set("Content-Type", contentType) - if image.Public { - req.Header.Set("X-LXD-public", "true") - } - - if image.Filename != "" { - req.Header.Set("X-LXD-filename", image.Filename) - } - - if len(image.Properties) > 0 { - imgProps := url.Values{} - - for k, v := range image.Properties { - imgProps.Set(k, v) - } - - req.Header.Set("X-LXD-properties", imgProps.Encode()) - } - - if len(image.Profiles) > 0 { - imgProfiles := url.Values{} - - for _, v := range image.Profiles { - imgProfiles.Add("profile", v) - } - - req.Header.Set("X-LXD-profiles", imgProfiles.Encode()) - } - - // Set the user agent - if image.Source != nil && image.Source.Fingerprint != "" && image.Source.Secret != "" && image.Source.Mode == "push" { - // Set fingerprint - req.Header.Set("X-LXD-fingerprint", image.Source.Fingerprint) - - // Set secret - req.Header.Set("X-LXD-secret", image.Source.Secret) - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - defer func() { _ = resp.Body.Close() }() - - if ioErr != nil { - return nil, err - } - - // Handle errors - response, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - - // Get to the operation - respOperation, err := response.MetadataAsOperation() - if err != nil { - return nil, err - } - - // Setup an Operation wrapper - op := operation{ - Operation: *respOperation, - r: r, - chActive: make(chan bool), - } - - return &op, nil -} - -// tryCopyImage iterates through the source server URLs until one lets it download the image. -func (r *ProtocolLXD) tryCopyImage(req api.ImagesPost, urls []string) (RemoteOperation, error) { - if len(urls) == 0 { - return nil, fmt.Errorf("The source server isn't listening on the network") - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - // For older servers, apply the aliases after copy - if !r.HasExtension("image_create_aliases") && req.Aliases != nil { - rop.chPost = make(chan bool) - - go func() { - defer close(rop.chPost) - - // Wait for the main operation to finish - <-rop.chDone - if rop.err != nil { - return - } - - var errors []remoteOperationResult - - // Get the operation data - op, err := rop.GetTarget() - if err != nil { - errors = append(errors, remoteOperationResult{Error: err}) - rop.err = remoteOperationError("Failed to get operation data", errors) - return - } - - // Extract the fingerprint - fingerprint := op.Metadata["fingerprint"].(string) - - // Add the aliases - for _, entry := range req.Aliases { - alias := api.ImageAliasesPost{} - alias.Name = entry.Name - alias.Target = fingerprint - - err := r.CreateImageAlias(alias) - if err != nil { - errors = append(errors, remoteOperationResult{Error: err}) - rop.err = remoteOperationError("Failed to create image alias", errors) - return - } - } - }() - } - - // Forward targetOp to remote op - go func() { - success := false - var errors []remoteOperationResult - for _, serverURL := range urls { - req.Source.Server = serverURL - - op, err := r.CreateImage(req, nil) - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - continue - } - - rop.handlerLock.Lock() - rop.targetOp = op - rop.handlerLock.Unlock() - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - - if shared.IsConnectionError(err) { - continue - } - - break - } - - success = true - break - } - - if !success { - rop.err = remoteOperationError("Failed remote image download", errors) - } - - close(rop.chDone) - }() - - return &rop, nil -} - -// CopyImage copies an image from a remote server. Additional options can be passed using ImageCopyArgs. -func (r *ProtocolLXD) CopyImage(source ImageServer, image api.Image, args *ImageCopyArgs) (RemoteOperation, error) { - // Quick checks. - if r.isSameServer(source) { - return nil, fmt.Errorf("The source and target servers must be different") - } - - // Handle profile list overrides. - if args != nil && args.Profiles != nil { - if !r.HasExtension("image_copy_profile") { - return nil, fmt.Errorf("The server is missing the required \"image_copy_profile\" API extension") - } - - image.Profiles = args.Profiles - } else { - // If profiles aren't provided, clear the list on the source to - // avoid requiring the destination to have them all. - image.Profiles = nil - } - - // Get source server connection information - info, err := source.GetConnectionInfo() - if err != nil { - return nil, err - } - - // Push mode - if args != nil && args.Mode == "push" { - // Get certificate and URL - info, err := r.GetConnectionInfo() - if err != nil { - return nil, err - } - - imagesPost := api.ImagesPost{ - Source: &api.ImagesPostSource{ - Fingerprint: image.Fingerprint, - Mode: args.Mode, - }, - } - - if args.CopyAliases { - imagesPost.Aliases = image.Aliases - } - - imagesPost.ExpiresAt = image.ExpiresAt - imagesPost.Properties = image.Properties - imagesPost.Public = args.Public - - // Receive token from target server. This token is later passed to the source which will use - // it, together with the URL and certificate, to connect to the target. - tokenOp, err := r.CreateImage(imagesPost, nil) - if err != nil { - return nil, err - } - - opAPI := tokenOp.Get() - - secret, ok := opAPI.Metadata["secret"] - if !ok { - return nil, fmt.Errorf("No token provided") - } - - req := api.ImageExportPost{ - Target: info.URL, - Certificate: info.Certificate, - Secret: secret.(string), - Aliases: image.Aliases, - Project: info.Project, - Profiles: image.Profiles, - } - - exportOp, err := source.ExportImage(image.Fingerprint, req) - if err != nil { - _ = tokenOp.Cancel() - return nil, err - } - - rop := remoteOperation{ - targetOp: exportOp, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - _ = tokenOp.Cancel() - close(rop.chDone) - }() - - return &rop, nil - } - - // Relay mode - if args != nil && args.Mode == "relay" { - metaFile, err := os.CreateTemp("", "lxc_image_") - if err != nil { - return nil, err - } - - defer func() { _ = os.Remove(metaFile.Name()) }() - - rootfsFile, err := os.CreateTemp("", "lxc_image_") - if err != nil { - return nil, err - } - - defer func() { _ = os.Remove(rootfsFile.Name()) }() - - // Import image - req := ImageFileRequest{ - MetaFile: metaFile, - RootfsFile: rootfsFile, - } - - resp, err := source.GetImageFile(image.Fingerprint, req) - if err != nil { - return nil, err - } - - // Export image - _, err = metaFile.Seek(0, 0) - if err != nil { - return nil, err - } - - _, err = rootfsFile.Seek(0, 0) - if err != nil { - return nil, err - } - - imagePost := api.ImagesPost{} - imagePost.Public = args.Public - imagePost.Profiles = image.Profiles - - if args.CopyAliases { - imagePost.Aliases = image.Aliases - if args.Aliases != nil { - imagePost.Aliases = append(imagePost.Aliases, args.Aliases...) - } - } - - createArgs := &ImageCreateArgs{ - MetaFile: metaFile, - MetaName: image.Filename, - Type: image.Type, - } - - if resp.RootfsName != "" { - // Deal with split images - createArgs.RootfsFile = rootfsFile - createArgs.RootfsName = image.Filename - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - go func() { - defer close(rop.chDone) - - op, err := r.CreateImage(imagePost, createArgs) - if err != nil { - rop.err = remoteOperationError("Failed to copy image", nil) - return - } - - rop.handlerLock.Lock() - rop.targetOp = op - rop.handlerLock.Unlock() - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - rop.err = remoteOperationError("Failed to copy image", nil) - return - } - }() - - return &rop, nil - } - - // Prepare the copy request - req := api.ImagesPost{ - Source: &api.ImagesPostSource{ - ImageSource: api.ImageSource{ - Certificate: info.Certificate, - Protocol: info.Protocol, - }, - Fingerprint: image.Fingerprint, - Mode: "pull", - Type: "image", - Project: info.Project, - }, - ImagePut: api.ImagePut{ - Profiles: image.Profiles, - }, - } - - if args != nil { - req.Source.ImageType = args.Type - } - - // Generate secret token if needed - if !image.Public { - secret, err := source.GetImageSecret(image.Fingerprint) - if err != nil { - return nil, err - } - - req.Source.Secret = secret - } - - // Process the arguments - if args != nil { - req.Aliases = args.Aliases - req.AutoUpdate = args.AutoUpdate - req.Public = args.Public - - if args.CopyAliases { - req.Aliases = image.Aliases - if args.Aliases != nil { - req.Aliases = append(req.Aliases, args.Aliases...) - } - } - } - - return r.tryCopyImage(req, info.Addresses) -} - -// UpdateImage updates the image definition. -func (r *ProtocolLXD) UpdateImage(fingerprint string, image api.ImagePut, ETag string) error { - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/images/%s", url.PathEscape(fingerprint)), image, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteImage requests that LXD removes an image from the store. -func (r *ProtocolLXD) DeleteImage(fingerprint string) (Operation, error) { - // Send the request - op, _, err := r.queryOperation("DELETE", fmt.Sprintf("/images/%s", url.PathEscape(fingerprint)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// RefreshImage requests that LXD issues an image refresh. -func (r *ProtocolLXD) RefreshImage(fingerprint string) (Operation, error) { - if !r.HasExtension("image_force_refresh") { - return nil, fmt.Errorf("The server is missing the required \"image_force_refresh\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/images/%s/refresh", url.PathEscape(fingerprint)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// CreateImageSecret requests that LXD issues a temporary image secret. -func (r *ProtocolLXD) CreateImageSecret(fingerprint string) (Operation, error) { - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/images/%s/secret", url.PathEscape(fingerprint)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// CreateImageAlias sets up a new image alias. -func (r *ProtocolLXD) CreateImageAlias(alias api.ImageAliasesPost) error { - // Send the request - _, _, err := r.query("POST", "/images/aliases", alias, "") - if err != nil { - return err - } - - return nil -} - -// UpdateImageAlias updates the image alias definition. -func (r *ProtocolLXD) UpdateImageAlias(name string, alias api.ImageAliasesEntryPut, ETag string) error { - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/images/aliases/%s", url.PathEscape(name)), alias, ETag) - if err != nil { - return err - } - - return nil -} - -// RenameImageAlias renames an existing image alias. -func (r *ProtocolLXD) RenameImageAlias(name string, alias api.ImageAliasesEntryPost) error { - // Send the request - _, _, err := r.query("POST", fmt.Sprintf("/images/aliases/%s", url.PathEscape(name)), alias, "") - if err != nil { - return err - } - - return nil -} - -// DeleteImageAlias removes an alias from the LXD image store. -func (r *ProtocolLXD) DeleteImageAlias(name string) error { - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/images/aliases/%s", url.PathEscape(name)), nil, "") - if err != nil { - return err - } - - return nil -} - -// ExportImage exports (copies) an image to a remote server. -func (r *ProtocolLXD) ExportImage(fingerprint string, image api.ImageExportPost) (Operation, error) { - if !r.HasExtension("images_push_relay") { - return nil, fmt.Errorf("The server is missing the required \"images_push_relay\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/images/%s/export", url.PathEscape(fingerprint)), &image, "") - if err != nil { - return nil, err - } - - return op, nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_instances.go b/vendor/github.com/lxc/lxd/client/lxd_instances.go deleted file mode 100644 index 9fc972a1..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_instances.go +++ /dev/null @@ -1,2677 +0,0 @@ -package lxd - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "net/url" - "strings" - - "github.com/gorilla/websocket" - "github.com/pkg/sftp" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/cancel" - "github.com/lxc/lxd/shared/ioprogress" - "github.com/lxc/lxd/shared/tcp" - "github.com/lxc/lxd/shared/units" -) - -// Instance handling functions. - -// instanceTypeToPath converts the instance type to a URL path prefix and query string values. -// If the remote server doesn't have the instances extension then the /containers endpoint is used -// as long as the requested instanceType is any or container. -func (r *ProtocolLXD) instanceTypeToPath(instanceType api.InstanceType) (string, url.Values, error) { - v := url.Values{} - - // If the remote server doesn't support instances extension, check that only containers - // or any type has been requested and then fallback to using the old /containers endpoint. - if !r.HasExtension("instances") { - if instanceType == api.InstanceTypeContainer || instanceType == api.InstanceTypeAny { - return "/containers", v, nil - } - - return "", v, fmt.Errorf("Requested instance type not supported by server") - } - - // If a specific instance type has been requested, add the instance-type filter parameter - // to the returned URL values so that it can be used in the final URL if needed to filter - // the result set being returned. - if instanceType != api.InstanceTypeAny { - v.Set("instance-type", string(instanceType)) - } - - return "/instances", v, nil -} - -// GetInstanceNames returns a list of instance names. -func (r *ProtocolLXD) GetInstanceNames(instanceType api.InstanceType) ([]string, error) { - baseURL, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - // Fetch the raw URL values. - urls := []string{} - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", baseURL, v.Encode()), nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetInstanceNamesAllProjects returns a list of instance names from all projects. -func (r *ProtocolLXD) GetInstanceNamesAllProjects(instanceType api.InstanceType) (map[string][]string, error) { - instances := []api.Instance{} - - path, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - v.Set("recursion", "1") - v.Set("all-projects", "true") - - // Fetch the raw URL values. - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) - if err != nil { - return nil, err - } - - names := map[string][]string{} - for _, instance := range instances { - names[instance.Project] = append(names[instance.Project], instance.Name) - } - - return names, nil -} - -// GetInstances returns a list of instances. -func (r *ProtocolLXD) GetInstances(instanceType api.InstanceType) ([]api.Instance, error) { - instances := []api.Instance{} - - path, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - v.Set("recursion", "1") - - // Fetch the raw value - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) - if err != nil { - return nil, err - } - - return instances, nil -} - -// GetInstancesWithFilter returns a filtered list of instances. -func (r *ProtocolLXD) GetInstancesWithFilter(instanceType api.InstanceType, filters []string) ([]api.Instance, error) { - if !r.HasExtension("api_filtering") { - return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") - } - - instances := []api.Instance{} - - path, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - v.Set("recursion", "1") - v.Set("filter", parseFilters(filters)) - - // Fetch the raw value - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) - if err != nil { - return nil, err - } - - return instances, nil -} - -// GetInstancesAllProjects returns a list of instances from all projects. -func (r *ProtocolLXD) GetInstancesAllProjects(instanceType api.InstanceType) ([]api.Instance, error) { - instances := []api.Instance{} - - path, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - v.Set("recursion", "1") - v.Set("all-projects", "true") - - if !r.HasExtension("instance_all_projects") { - return nil, fmt.Errorf("The server is missing the required \"instance_all_projects\" API extension") - } - - // Fetch the raw value - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) - if err != nil { - return nil, err - } - - return instances, nil -} - -// GetInstancesAllProjectsWithFilter returns a filtered list of instances from all projects. -func (r *ProtocolLXD) GetInstancesAllProjectsWithFilter(instanceType api.InstanceType, filters []string) ([]api.Instance, error) { - if !r.HasExtension("api_filtering") { - return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") - } - - instances := []api.Instance{} - - path, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - v.Set("recursion", "1") - v.Set("all-projects", "true") - v.Set("filter", parseFilters(filters)) - - if !r.HasExtension("instance_all_projects") { - return nil, fmt.Errorf("The server is missing the required \"instance_all_projects\" API extension") - } - - // Fetch the raw value - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) - if err != nil { - return nil, err - } - - return instances, nil -} - -// UpdateInstances updates all instances to match the requested state. -func (r *ProtocolLXD) UpdateInstances(state api.InstancesPut, ETag string) (Operation, error) { - path, v, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Send the request - op, _, err := r.queryOperation("PUT", fmt.Sprintf("%s?%s", path, v.Encode()), state, ETag) - if err != nil { - return nil, err - } - - return op, nil -} - -// GetInstancesFull returns a list of instances including snapshots, backups and state. -func (r *ProtocolLXD) GetInstancesFull(instanceType api.InstanceType) ([]api.InstanceFull, error) { - instances := []api.InstanceFull{} - - path, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - v.Set("recursion", "2") - - if !r.HasExtension("container_full") { - return nil, fmt.Errorf("The server is missing the required \"container_full\" API extension") - } - - // Fetch the raw value - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) - if err != nil { - return nil, err - } - - return instances, nil -} - -// GetInstancesFullWithFilter returns a filtered list of instances including snapshots, backups and state. -func (r *ProtocolLXD) GetInstancesFullWithFilter(instanceType api.InstanceType, filters []string) ([]api.InstanceFull, error) { - if !r.HasExtension("api_filtering") { - return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") - } - - instances := []api.InstanceFull{} - - path, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - v.Set("recursion", "2") - v.Set("filter", parseFilters(filters)) - - if !r.HasExtension("container_full") { - return nil, fmt.Errorf("The server is missing the required \"container_full\" API extension") - } - - // Fetch the raw value - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) - if err != nil { - return nil, err - } - - return instances, nil -} - -// GetInstancesFullAllProjects returns a list of instances including snapshots, backups and state from all projects. -func (r *ProtocolLXD) GetInstancesFullAllProjects(instanceType api.InstanceType) ([]api.InstanceFull, error) { - instances := []api.InstanceFull{} - - path, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - v.Set("recursion", "2") - v.Set("all-projects", "true") - - if !r.HasExtension("container_full") { - return nil, fmt.Errorf("The server is missing the required \"container_full\" API extension") - } - - if !r.HasExtension("instance_all_projects") { - return nil, fmt.Errorf("The server is missing the required \"instance_all_projects\" API extension") - } - - // Fetch the raw value - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) - if err != nil { - return nil, err - } - - return instances, nil -} - -// GetInstancesFullAllProjectsWithFilter returns a filtered list of instances including snapshots, backups and state from all projects. -func (r *ProtocolLXD) GetInstancesFullAllProjectsWithFilter(instanceType api.InstanceType, filters []string) ([]api.InstanceFull, error) { - if !r.HasExtension("api_filtering") { - return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") - } - - instances := []api.InstanceFull{} - - path, v, err := r.instanceTypeToPath(instanceType) - if err != nil { - return nil, err - } - - v.Set("recursion", "2") - v.Set("all-projects", "true") - v.Set("filter", parseFilters(filters)) - - if !r.HasExtension("container_full") { - return nil, fmt.Errorf("The server is missing the required \"container_full\" API extension") - } - - if !r.HasExtension("instance_all_projects") { - return nil, fmt.Errorf("The server is missing the required \"instance_all_projects\" API extension") - } - - // Fetch the raw value - _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) - if err != nil { - return nil, err - } - - return instances, nil -} - -// GetInstance returns the instance entry for the provided name. -func (r *ProtocolLXD) GetInstance(name string) (*api.Instance, string, error) { - instance := api.Instance{} - - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, "", err - } - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), nil, "", &instance) - if err != nil { - return nil, "", err - } - - return &instance, etag, nil -} - -// GetInstanceFull returns the instance entry for the provided name along with snapshot information. -func (r *ProtocolLXD) GetInstanceFull(name string) (*api.InstanceFull, string, error) { - instance := api.InstanceFull{} - - if !r.HasExtension("instance_get_full") { - // Backware compatibility. - ct, _, err := r.GetInstance(name) - if err != nil { - return nil, "", err - } - - cs, _, err := r.GetInstanceState(name) - if err != nil { - return nil, "", err - } - - snaps, err := r.GetInstanceSnapshots(name) - if err != nil { - return nil, "", err - } - - backups, err := r.GetInstanceBackups(name) - if err != nil { - return nil, "", err - } - - instance.Instance = *ct - instance.State = cs - instance.Snapshots = snaps - instance.Backups = backups - - return &instance, "", nil - } - - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, "", err - } - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("%s/%s?recursion=1", path, url.PathEscape(name)), nil, "", &instance) - if err != nil { - return nil, "", err - } - - return &instance, etag, nil -} - -// CreateInstanceFromBackup is a convenience function to make it easier to -// create a instance from a backup. -func (r *ProtocolLXD) CreateInstanceFromBackup(args InstanceBackupArgs) (Operation, error) { - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if args.PoolName == "" && args.Name == "" { - // Send the request - op, _, err := r.queryOperation("POST", path, args.BackupFile, "") - if err != nil { - return nil, err - } - - return op, nil - } - - if args.PoolName != "" && !r.HasExtension("container_backup_override_pool") { - return nil, fmt.Errorf(`The server is missing the required "container_backup_override_pool" API extension`) - } - - if args.Name != "" && !r.HasExtension("backup_override_name") { - return nil, fmt.Errorf(`The server is missing the required "backup_override_name" API extension`) - } - - // Prepare the HTTP request - reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path)) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", reqURL, args.BackupFile) - if err != nil { - return nil, err - } - - req.Header.Set("Content-Type", "application/octet-stream") - - if args.PoolName != "" { - req.Header.Set("X-LXD-pool", args.PoolName) - } - - if args.Name != "" { - req.Header.Set("X-LXD-name", args.Name) - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - defer func() { _ = resp.Body.Close() }() - - // Handle errors - response, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - - // Get to the operation - respOperation, err := response.MetadataAsOperation() - if err != nil { - return nil, err - } - - // Setup an Operation wrapper - op := operation{ - Operation: *respOperation, - r: r, - chActive: make(chan bool), - } - - return &op, nil -} - -// CreateInstance requests that LXD creates a new instance. -func (r *ProtocolLXD) CreateInstance(instance api.InstancesPost) (Operation, error) { - path, _, err := r.instanceTypeToPath(instance.Type) - if err != nil { - return nil, err - } - - if instance.Source.InstanceOnly || instance.Source.ContainerOnly { - if !r.HasExtension("container_only_migration") { - return nil, fmt.Errorf("The server is missing the required \"container_only_migration\" API extension") - } - } - - // Send the request - op, _, err := r.queryOperation("POST", path, instance, "") - if err != nil { - return nil, err - } - - return op, nil -} - -func (r *ProtocolLXD) tryCreateInstance(req api.InstancesPost, urls []string, op Operation) (RemoteOperation, error) { - if len(urls) == 0 { - return nil, fmt.Errorf("The source server isn't listening on the network") - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - operation := req.Source.Operation - - // Forward targetOp to remote op - go func() { - success := false - var errors []remoteOperationResult - for _, serverURL := range urls { - if operation == "" { - req.Source.Server = serverURL - } else { - req.Source.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) - } - - op, err := r.CreateInstance(req) - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - continue - } - - rop.handlerLock.Lock() - rop.targetOp = op - rop.handlerLock.Unlock() - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - - if shared.IsConnectionError(err) { - continue - } - - break - } - - success = true - break - } - - if !success { - rop.err = remoteOperationError("Failed instance creation", errors) - if op != nil { - _ = op.Cancel() - } - } - - close(rop.chDone) - }() - - return &rop, nil -} - -// CreateInstanceFromImage is a convenience function to make it easier to create a instance from an existing image. -func (r *ProtocolLXD) CreateInstanceFromImage(source ImageServer, image api.Image, req api.InstancesPost) (RemoteOperation, error) { - // Set the minimal source fields - req.Source.Type = "image" - - // Optimization for the local image case - if r.isSameServer(source) { - // Always use fingerprints for local case - req.Source.Fingerprint = image.Fingerprint - req.Source.Alias = "" - - op, err := r.CreateInstance(req) - if err != nil { - return nil, err - } - - rop := remoteOperation{ - targetOp: op, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Minimal source fields for remote image - req.Source.Mode = "pull" - - // If we have an alias and the image is public, use that - if req.Source.Alias != "" && image.Public { - req.Source.Fingerprint = "" - } else { - req.Source.Fingerprint = image.Fingerprint - req.Source.Alias = "" - } - - // Get source server connection information - info, err := source.GetConnectionInfo() - if err != nil { - return nil, err - } - - req.Source.Protocol = info.Protocol - req.Source.Certificate = info.Certificate - - // Generate secret token if needed - if !image.Public { - secret, err := source.GetImageSecret(image.Fingerprint) - if err != nil { - return nil, err - } - - req.Source.Secret = secret - } - - return r.tryCreateInstance(req, info.Addresses, nil) -} - -// CopyInstance copies a instance from a remote server. Additional options can be passed using InstanceCopyArgs. -func (r *ProtocolLXD) CopyInstance(source InstanceServer, instance api.Instance, args *InstanceCopyArgs) (RemoteOperation, error) { - // Base request - req := api.InstancesPost{ - Name: instance.Name, - InstancePut: instance.Writable(), - Type: api.InstanceType(instance.Type), - } - - req.Source.BaseImage = instance.Config["volatile.base_image"] - - // Process the copy arguments - if args != nil { - // Quick checks. - if args.InstanceOnly { - if !r.HasExtension("container_only_migration") { - return nil, fmt.Errorf("The target server is missing the required \"container_only_migration\" API extension") - } - - if !source.HasExtension("container_only_migration") { - return nil, fmt.Errorf("The source server is missing the required \"container_only_migration\" API extension") - } - } - - if shared.StringInSlice(args.Mode, []string{"push", "relay"}) { - if !r.HasExtension("container_push") { - return nil, fmt.Errorf("The target server is missing the required \"container_push\" API extension") - } - - if !source.HasExtension("container_push") { - return nil, fmt.Errorf("The source server is missing the required \"container_push\" API extension") - } - } - - if args.Mode == "push" && !source.HasExtension("container_push_target") { - return nil, fmt.Errorf("The source server is missing the required \"container_push_target\" API extension") - } - - if args.Refresh { - if !r.HasExtension("container_incremental_copy") { - return nil, fmt.Errorf("The target server is missing the required \"container_incremental_copy\" API extension") - } - - if !source.HasExtension("container_incremental_copy") { - return nil, fmt.Errorf("The source server is missing the required \"container_incremental_copy\" API extension") - } - } - - if args.AllowInconsistent { - if !r.HasExtension("instance_allow_inconsistent_copy") { - return nil, fmt.Errorf("The source server is missing the required \"instance_allow_inconsistent_copy\" API extension") - } - } - - // Allow overriding the target name - if args.Name != "" { - req.Name = args.Name - } - - req.Source.Live = args.Live - req.Source.InstanceOnly = args.InstanceOnly - req.Source.ContainerOnly = args.InstanceOnly // For legacy servers. - req.Source.Refresh = args.Refresh - req.Source.AllowInconsistent = args.AllowInconsistent - } - - if req.Source.Live { - req.Source.Live = instance.StatusCode == api.Running - } - - sourceInfo, err := source.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get source connection info: %w", err) - } - - destInfo, err := r.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get destination connection info: %w", err) - } - - // Optimization for the local copy case - if destInfo.URL == sourceInfo.URL && destInfo.SocketPath == sourceInfo.SocketPath && (!r.IsClustered() || instance.Location == r.clusterTarget || r.HasExtension("cluster_internal_copy")) { - // Project handling - if destInfo.Project != sourceInfo.Project { - if !r.HasExtension("container_copy_project") { - return nil, fmt.Errorf("The server is missing the required \"container_copy_project\" API extension") - } - - req.Source.Project = sourceInfo.Project - } - - // Local copy source fields - req.Source.Type = "copy" - req.Source.Source = instance.Name - - // Copy the instance - op, err := r.CreateInstance(req) - if err != nil { - return nil, err - } - - rop := remoteOperation{ - targetOp: op, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Source request - sourceReq := api.InstancePost{ - Migration: true, - Live: req.Source.Live, - ContainerOnly: req.Source.ContainerOnly, // Deprecated, use InstanceOnly. - InstanceOnly: req.Source.InstanceOnly, - AllowInconsistent: req.Source.AllowInconsistent, - } - - // Push mode migration - if args != nil && args.Mode == "push" { - // Get target server connection information - info, err := r.GetConnectionInfo() - if err != nil { - return nil, err - } - - // Create the instance - req.Source.Type = "migration" - req.Source.Mode = "push" - req.Source.Refresh = args.Refresh - - op, err := r.CreateInstance(req) - if err != nil { - return nil, err - } - - opAPI := op.Get() - - targetSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Prepare the source request - target := api.InstancePostTarget{} - target.Operation = opAPI.ID - target.Websockets = targetSecrets - target.Certificate = info.Certificate - sourceReq.Target = &target - - return r.tryMigrateInstance(source, instance.Name, sourceReq, info.Addresses) - } - - // Get source server connection information - info, err := source.GetConnectionInfo() - if err != nil { - return nil, err - } - - op, err := source.MigrateInstance(instance.Name, sourceReq) - if err != nil { - return nil, err - } - - opAPI := op.Get() - - sourceSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - sourceSecrets[k] = v.(string) - } - - // Relay mode migration - if args != nil && args.Mode == "relay" { - // Push copy source fields - req.Source.Type = "migration" - req.Source.Mode = "push" - - // Start the process - targetOp, err := r.CreateInstance(req) - if err != nil { - return nil, err - } - - targetOpAPI := targetOp.Get() - - // Extract the websockets - targetSecrets := map[string]string{} - for k, v := range targetOpAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Launch the relay - err = r.proxyMigration(targetOp.(*operation), targetSecrets, source, op.(*operation), sourceSecrets) - if err != nil { - return nil, err - } - - // Prepare a tracking operation - rop := remoteOperation{ - targetOp: targetOp, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Pull mode migration - req.Source.Type = "migration" - req.Source.Mode = "pull" - req.Source.Operation = opAPI.ID - req.Source.Websockets = sourceSecrets - req.Source.Certificate = info.Certificate - - return r.tryCreateInstance(req, info.Addresses, op) -} - -// UpdateInstance updates the instance definition. -func (r *ProtocolLXD) UpdateInstance(name string, instance api.InstancePut, ETag string) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Send the request - op, _, err := r.queryOperation("PUT", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), instance, ETag) - if err != nil { - return nil, err - } - - return op, nil -} - -// RenameInstance requests that LXD renames the instance. -func (r *ProtocolLXD) RenameInstance(name string, instance api.InstancePost) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Quick check. - if instance.Migration { - return nil, fmt.Errorf("Can't ask for a migration through RenameInstance") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), instance, "") - if err != nil { - return nil, err - } - - return op, nil -} - -func (r *ProtocolLXD) tryMigrateInstance(source InstanceServer, name string, req api.InstancePost, urls []string) (RemoteOperation, error) { - if len(urls) == 0 { - return nil, fmt.Errorf("The target server isn't listening on the network") - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - operation := req.Target.Operation - - // Forward targetOp to remote op - go func() { - success := false - var errors []remoteOperationResult - for _, serverURL := range urls { - req.Target.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) - - op, err := source.MigrateInstance(name, req) - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - continue - } - - rop.targetOp = op - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - - if shared.IsConnectionError(err) { - continue - } - - break - } - - success = true - break - } - - if !success { - rop.err = remoteOperationError("Failed instance migration", errors) - } - - close(rop.chDone) - }() - - return &rop, nil -} - -// MigrateInstance requests that LXD prepares for a instance migration. -func (r *ProtocolLXD) MigrateInstance(name string, instance api.InstancePost) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if instance.InstanceOnly || instance.ContainerOnly { - if !r.HasExtension("container_only_migration") { - return nil, fmt.Errorf("The server is missing the required \"container_only_migration\" API extension") - } - } - - if instance.Pool != "" && !r.HasExtension("instance_pool_move") { - return nil, fmt.Errorf("The server is missing the required \"instance_pool_move\" API extension") - } - - if instance.Project != "" && !r.HasExtension("instance_project_move") { - return nil, fmt.Errorf("The server is missing the required \"instance_project_move\" API extension") - } - - if instance.AllowInconsistent && !r.HasExtension("cluster_migration_inconsistent_copy") { - return nil, fmt.Errorf("The server is missing the required \"cluster_migration_inconsistent_copy\" API extension") - } - - // Quick check. - if !instance.Migration { - return nil, fmt.Errorf("Can't ask for a rename through MigrateInstance") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), instance, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteInstance requests that LXD deletes the instance. -func (r *ProtocolLXD) DeleteInstance(name string) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Send the request - op, _, err := r.queryOperation("DELETE", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// ExecInstance requests that LXD spawns a command inside the instance. -func (r *ProtocolLXD) ExecInstance(instanceName string, exec api.InstanceExecPost, args *InstanceExecArgs) (Operation, error) { - if exec.RecordOutput { - if !r.HasExtension("container_exec_recording") { - return nil, fmt.Errorf("The server is missing the required \"container_exec_recording\" API extension") - } - } - - if exec.User > 0 || exec.Group > 0 || exec.Cwd != "" { - if !r.HasExtension("container_exec_user_group_cwd") { - return nil, fmt.Errorf("The server is missing the required \"container_exec_user_group_cwd\" API extension") - } - } - - var uri string - - if r.IsAgent() { - uri = "/exec" - } else { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - uri = fmt.Sprintf("%s/%s/exec", path, url.PathEscape(instanceName)) - } - - // Send the request - op, _, err := r.queryOperation("POST", uri, exec, "") - if err != nil { - return nil, err - } - - opAPI := op.Get() - - // Process additional arguments - if args != nil { - // Parse the fds - fds := map[string]string{} - - value, ok := opAPI.Metadata["fds"] - if ok { - values := value.(map[string]any) - for k, v := range values { - fds[k] = v.(string) - } - } - - // Call the control handler with a connection to the control socket - if args.Control != nil && fds[api.SecretNameControl] != "" { - conn, err := r.GetOperationWebsocket(opAPI.ID, fds[api.SecretNameControl]) - if err != nil { - return nil, err - } - - go args.Control(conn) - } - - if exec.Interactive { - // Handle interactive sections - if args.Stdin != nil && args.Stdout != nil { - // Connect to the websocket - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) - if err != nil { - return nil, err - } - - // And attach stdin and stdout to it - go func() { - shared.WebsocketSendStream(conn, args.Stdin, -1) - <-shared.WebsocketRecvStream(args.Stdout, conn) - _ = conn.Close() - - if args.DataDone != nil { - close(args.DataDone) - } - }() - } else { - if args.DataDone != nil { - close(args.DataDone) - } - } - } else { - // Handle non-interactive sessions - dones := map[int]chan bool{} - conns := []*websocket.Conn{} - - // Handle stdin - if fds["0"] != "" { - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) - if err != nil { - return nil, err - } - - conns = append(conns, conn) - dones[0] = shared.WebsocketSendStream(conn, args.Stdin, -1) - } - - // Handle stdout - if fds["1"] != "" { - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["1"]) - if err != nil { - return nil, err - } - - conns = append(conns, conn) - dones[1] = shared.WebsocketRecvStream(args.Stdout, conn) - } - - // Handle stderr - if fds["2"] != "" { - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["2"]) - if err != nil { - return nil, err - } - - conns = append(conns, conn) - dones[2] = shared.WebsocketRecvStream(args.Stderr, conn) - } - - // Wait for everything to be done - go func() { - for i, chDone := range dones { - // Skip stdin, dealing with it separately below - if i == 0 { - continue - } - - <-chDone - } - - if fds["0"] != "" { - if args.Stdin != nil { - _ = args.Stdin.Close() - } - - // Empty the stdin channel but don't block on it as - // stdin may be stuck in Read() - go func() { - <-dones[0] - }() - } - - for _, conn := range conns { - _ = conn.Close() - } - - if args.DataDone != nil { - close(args.DataDone) - } - }() - } - } - - return op, nil -} - -// GetInstanceFile retrieves the provided path from the instance. -func (r *ProtocolLXD) GetInstanceFile(instanceName string, filePath string) (io.ReadCloser, *InstanceFileResponse, error) { - var err error - var requestURL string - - if r.IsAgent() { - requestURL, err = shared.URLEncode( - fmt.Sprintf("%s/1.0/files", r.httpBaseURL.String()), - map[string]string{"path": filePath}) - } else { - var path string - - path, _, err = r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, nil, err - } - - // Prepare the HTTP request - requestURL, err = shared.URLEncode( - fmt.Sprintf("%s/1.0%s/%s/files", r.httpBaseURL.String(), path, url.PathEscape(instanceName)), - map[string]string{"path": filePath}) - } - - if err != nil { - return nil, nil, err - } - - requestURL, err = r.setQueryAttributes(requestURL) - if err != nil { - return nil, nil, err - } - - req, err := http.NewRequest("GET", requestURL, nil) - if err != nil { - return nil, nil, err - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, nil, err - } - - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, nil, err - } - } - - // Parse the headers - uid, gid, mode, fileType, _ := shared.ParseLXDFileHeaders(resp.Header) - fileResp := InstanceFileResponse{ - UID: uid, - GID: gid, - Mode: mode, - Type: fileType, - } - - if fileResp.Type == "directory" { - // Decode the response - response := api.Response{} - decoder := json.NewDecoder(resp.Body) - - err = decoder.Decode(&response) - if err != nil { - return nil, nil, err - } - - // Get the file list - entries := []string{} - err = response.MetadataAsStruct(&entries) - if err != nil { - return nil, nil, err - } - - fileResp.Entries = entries - - return nil, &fileResp, err - } - - return resp.Body, &fileResp, err -} - -// CreateInstanceFile tells LXD to create a file in the instance. -func (r *ProtocolLXD) CreateInstanceFile(instanceName string, filePath string, args InstanceFileArgs) error { - if args.Type == "directory" { - if !r.HasExtension("directory_manipulation") { - return fmt.Errorf("The server is missing the required \"directory_manipulation\" API extension") - } - } - - if args.Type == "symlink" { - if !r.HasExtension("file_symlinks") { - return fmt.Errorf("The server is missing the required \"file_symlinks\" API extension") - } - } - - if args.WriteMode == "append" { - if !r.HasExtension("file_append") { - return fmt.Errorf("The server is missing the required \"file_append\" API extension") - } - } - - var requestURL string - - if r.IsAgent() { - requestURL = fmt.Sprintf("%s/1.0/files?path=%s", r.httpBaseURL.String(), url.QueryEscape(filePath)) - } else { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return err - } - - // Prepare the HTTP request - requestURL = fmt.Sprintf("%s/1.0%s/%s/files?path=%s", r.httpBaseURL.String(), path, url.PathEscape(instanceName), url.QueryEscape(filePath)) - } - - requestURL, err := r.setQueryAttributes(requestURL) - if err != nil { - return err - } - - req, err := http.NewRequest("POST", requestURL, args.Content) - if err != nil { - return err - } - - // Set the various headers - if args.UID > -1 { - req.Header.Set("X-LXD-uid", fmt.Sprintf("%d", args.UID)) - } - - if args.GID > -1 { - req.Header.Set("X-LXD-gid", fmt.Sprintf("%d", args.GID)) - } - - if args.Mode > -1 { - req.Header.Set("X-LXD-mode", fmt.Sprintf("%04o", args.Mode)) - } - - if args.Type != "" { - req.Header.Set("X-LXD-type", args.Type) - } - - if args.WriteMode != "" { - req.Header.Set("X-LXD-write", args.WriteMode) - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return err - } - - // Check the return value for a cleaner error - _, _, err = lxdParseResponse(resp) - if err != nil { - return err - } - - return nil -} - -// DeleteInstanceFile deletes a file in the instance. -func (r *ProtocolLXD) DeleteInstanceFile(instanceName string, filePath string) error { - if !r.HasExtension("file_delete") { - return fmt.Errorf("The server is missing the required \"file_delete\" API extension") - } - - var requestURL string - - if r.IsAgent() { - requestURL = fmt.Sprintf("/files?path=%s", url.QueryEscape(filePath)) - } else { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return err - } - - // Prepare the HTTP request - requestURL = fmt.Sprintf("%s/%s/files?path=%s", path, url.PathEscape(instanceName), url.QueryEscape(filePath)) - } - - requestURL, err := r.setQueryAttributes(requestURL) - if err != nil { - return err - } - - // Send the request - _, _, err = r.query("DELETE", requestURL, nil, "") - if err != nil { - return err - } - - return nil -} - -// rawSFTPConn connects to the apiURL, upgrades to an SFTP raw connection and returns it. -func (r *ProtocolLXD) rawSFTPConn(apiURL *url.URL) (net.Conn, error) { - // Get the HTTP transport. - httpTransport, err := r.getUnderlyingHTTPTransport() - if err != nil { - return nil, err - } - - req := &http.Request{ - Method: http.MethodGet, - URL: apiURL, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: apiURL.Host, - } - - req.Header["Upgrade"] = []string{"sftp"} - req.Header["Connection"] = []string{"Upgrade"} - - r.addClientHeaders(req) - - // Establish the connection. - var conn net.Conn - - if httpTransport.TLSClientConfig != nil { - conn, err = httpTransport.DialTLSContext(context.Background(), "tcp", apiURL.Host) - } else { - conn, err = httpTransport.DialContext(context.Background(), "tcp", apiURL.Host) - } - - if err != nil { - return nil, err - } - - remoteTCP, _ := tcp.ExtractConn(conn) - if remoteTCP != nil { - err = tcp.SetTimeouts(remoteTCP, 0) - if err != nil { - return nil, err - } - } - - err = req.Write(conn) - if err != nil { - return nil, err - } - - resp, err := http.ReadResponse(bufio.NewReader(conn), req) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusSwitchingProtocols { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - } - - if resp.Header.Get("Upgrade") != "sftp" { - return nil, fmt.Errorf("Missing or unexpected Upgrade header in response") - } - - return conn, err -} - -// GetInstanceFileSFTPConn returns a connection to the instance's SFTP endpoint. -func (r *ProtocolLXD) GetInstanceFileSFTPConn(instanceName string) (net.Conn, error) { - apiURL := api.NewURL() - apiURL.URL = r.httpBaseURL // Preload the URL with the client base URL. - apiURL.Path("1.0", "instances", instanceName, "sftp") - r.setURLQueryAttributes(&apiURL.URL) - - return r.rawSFTPConn(&apiURL.URL) -} - -// GetInstanceFileSFTP returns an SFTP connection to the instance. -func (r *ProtocolLXD) GetInstanceFileSFTP(instanceName string) (*sftp.Client, error) { - conn, err := r.GetInstanceFileSFTPConn(instanceName) - if err != nil { - return nil, err - } - - // Get a SFTP client. - client, err := sftp.NewClientPipe(conn, conn) - if err != nil { - _ = conn.Close() - return nil, err - } - - go func() { - // Wait for the client to be done before closing the connection. - _ = client.Wait() - _ = conn.Close() - }() - - return client, nil -} - -// GetInstanceSnapshotNames returns a list of snapshot names for the instance. -func (r *ProtocolLXD) GetInstanceSnapshotNames(instanceName string) ([]string, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("%s/%s/snapshots", path, url.PathEscape(instanceName)) - _, err = r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetInstanceSnapshots returns a list of snapshots for the instance. -func (r *ProtocolLXD) GetInstanceSnapshots(instanceName string) ([]api.InstanceSnapshot, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - snapshots := []api.InstanceSnapshot{} - - // Fetch the raw value - _, err = r.queryStruct("GET", fmt.Sprintf("%s/%s/snapshots?recursion=1", path, url.PathEscape(instanceName)), nil, "", &snapshots) - if err != nil { - return nil, err - } - - return snapshots, nil -} - -// GetInstanceSnapshot returns a Snapshot struct for the provided instance and snapshot names. -func (r *ProtocolLXD) GetInstanceSnapshot(instanceName string, name string) (*api.InstanceSnapshot, string, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, "", err - } - - snapshot := api.InstanceSnapshot{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), nil, "", &snapshot) - if err != nil { - return nil, "", err - } - - return &snapshot, etag, nil -} - -// CreateInstanceSnapshot requests that LXD creates a new snapshot for the instance. -func (r *ProtocolLXD) CreateInstanceSnapshot(instanceName string, snapshot api.InstanceSnapshotsPost) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Validate the request - if snapshot.ExpiresAt != nil && !r.HasExtension("snapshot_expiry_creation") { - return nil, fmt.Errorf("The server is missing the required \"snapshot_expiry_creation\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/snapshots", path, url.PathEscape(instanceName)), snapshot, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// CopyInstanceSnapshot copies a snapshot from a remote server into a new instance. Additional options can be passed using InstanceCopyArgs. -func (r *ProtocolLXD) CopyInstanceSnapshot(source InstanceServer, instanceName string, snapshot api.InstanceSnapshot, args *InstanceSnapshotCopyArgs) (RemoteOperation, error) { - // Backward compatibility (with broken Name field) - fields := strings.Split(snapshot.Name, shared.SnapshotDelimiter) - cName := instanceName - sName := fields[len(fields)-1] - - // Base request - req := api.InstancesPost{ - Name: cName, - InstancePut: api.InstancePut{ - Architecture: snapshot.Architecture, - Config: snapshot.Config, - Devices: snapshot.Devices, - Ephemeral: snapshot.Ephemeral, - Profiles: snapshot.Profiles, - }, - } - - if snapshot.Stateful && args.Live { - if !r.HasExtension("container_snapshot_stateful_migration") { - return nil, fmt.Errorf("The server is missing the required \"container_snapshot_stateful_migration\" API extension") - } - - req.InstancePut.Stateful = snapshot.Stateful - req.Source.Live = false // Snapshots are never running and so we don't need live migration. - } - - req.Source.BaseImage = snapshot.Config["volatile.base_image"] - - // Process the copy arguments - if args != nil { - // Quick checks. - if shared.StringInSlice(args.Mode, []string{"push", "relay"}) { - if !r.HasExtension("container_push") { - return nil, fmt.Errorf("The target server is missing the required \"container_push\" API extension") - } - - if !source.HasExtension("container_push") { - return nil, fmt.Errorf("The source server is missing the required \"container_push\" API extension") - } - } - - if args.Mode == "push" && !source.HasExtension("container_push_target") { - return nil, fmt.Errorf("The source server is missing the required \"container_push_target\" API extension") - } - - // Allow overriding the target name - if args.Name != "" { - req.Name = args.Name - } - } - - sourceInfo, err := source.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get source connection info: %w", err) - } - - destInfo, err := r.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get destination connection info: %w", err) - } - - instance, _, err := source.GetInstance(cName) - if err != nil { - return nil, fmt.Errorf("Failed to get instance info: %w", err) - } - - // Optimization for the local copy case - if destInfo.URL == sourceInfo.URL && destInfo.SocketPath == sourceInfo.SocketPath && (!r.IsClustered() || instance.Location == r.clusterTarget || r.HasExtension("cluster_internal_copy")) { - // Project handling - if destInfo.Project != sourceInfo.Project { - if !r.HasExtension("container_copy_project") { - return nil, fmt.Errorf("The server is missing the required \"container_copy_project\" API extension") - } - - req.Source.Project = sourceInfo.Project - } - - // Local copy source fields - req.Source.Type = "copy" - req.Source.Source = fmt.Sprintf("%s/%s", cName, sName) - - // Copy the instance - op, err := r.CreateInstance(req) - if err != nil { - return nil, err - } - - rop := remoteOperation{ - targetOp: op, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // If deadling with migration, we need to set the type. - if source.HasExtension("virtual-machines") { - inst, _, err := source.GetInstance(instanceName) - if err != nil { - return nil, err - } - - req.Type = api.InstanceType(inst.Type) - } - - // Source request - sourceReq := api.InstanceSnapshotPost{ - Migration: true, - Name: args.Name, - } - - if snapshot.Stateful && args.Live { - sourceReq.Live = args.Live - } - - // Push mode migration - if args != nil && args.Mode == "push" { - // Get target server connection information - info, err := r.GetConnectionInfo() - if err != nil { - return nil, err - } - - // Create the instance - req.Source.Type = "migration" - req.Source.Mode = "push" - - op, err := r.CreateInstance(req) - if err != nil { - return nil, err - } - - opAPI := op.Get() - - targetSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Prepare the source request - target := api.InstancePostTarget{} - target.Operation = opAPI.ID - target.Websockets = targetSecrets - target.Certificate = info.Certificate - sourceReq.Target = &target - - return r.tryMigrateInstanceSnapshot(source, cName, sName, sourceReq, info.Addresses) - } - - // Get source server connection information - info, err := source.GetConnectionInfo() - if err != nil { - return nil, err - } - - op, err := source.MigrateInstanceSnapshot(cName, sName, sourceReq) - if err != nil { - return nil, err - } - - opAPI := op.Get() - - sourceSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - sourceSecrets[k] = v.(string) - } - - // Relay mode migration - if args != nil && args.Mode == "relay" { - // Push copy source fields - req.Source.Type = "migration" - req.Source.Mode = "push" - - // Start the process - targetOp, err := r.CreateInstance(req) - if err != nil { - return nil, err - } - - targetOpAPI := targetOp.Get() - - // Extract the websockets - targetSecrets := map[string]string{} - for k, v := range targetOpAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Launch the relay - err = r.proxyMigration(targetOp.(*operation), targetSecrets, source, op.(*operation), sourceSecrets) - if err != nil { - return nil, err - } - - // Prepare a tracking operation - rop := remoteOperation{ - targetOp: targetOp, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Pull mode migration - req.Source.Type = "migration" - req.Source.Mode = "pull" - req.Source.Operation = opAPI.ID - req.Source.Websockets = sourceSecrets - req.Source.Certificate = info.Certificate - - return r.tryCreateInstance(req, info.Addresses, op) -} - -// RenameInstanceSnapshot requests that LXD renames the snapshot. -func (r *ProtocolLXD) RenameInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPost) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Quick check. - if instance.Migration { - return nil, fmt.Errorf("Can't ask for a migration through RenameInstanceSnapshot") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), instance, "") - if err != nil { - return nil, err - } - - return op, nil -} - -func (r *ProtocolLXD) tryMigrateInstanceSnapshot(source InstanceServer, instanceName string, name string, req api.InstanceSnapshotPost, urls []string) (RemoteOperation, error) { - if len(urls) == 0 { - return nil, fmt.Errorf("The target server isn't listening on the network") - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - operation := req.Target.Operation - - // Forward targetOp to remote op - go func() { - success := false - var errors []remoteOperationResult - for _, serverURL := range urls { - req.Target.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) - - op, err := source.MigrateInstanceSnapshot(instanceName, name, req) - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - continue - } - - rop.targetOp = op - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - - if shared.IsConnectionError(err) { - continue - } - - break - } - - success = true - break - } - - if !success { - rop.err = remoteOperationError("Failed instance migration", errors) - } - - close(rop.chDone) - }() - - return &rop, nil -} - -// MigrateInstanceSnapshot requests that LXD prepares for a snapshot migration. -func (r *ProtocolLXD) MigrateInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPost) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Quick check. - if !instance.Migration { - return nil, fmt.Errorf("Can't ask for a rename through MigrateInstanceSnapshot") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), instance, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteInstanceSnapshot requests that LXD deletes the instance snapshot. -func (r *ProtocolLXD) DeleteInstanceSnapshot(instanceName string, name string) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Send the request - op, _, err := r.queryOperation("DELETE", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// UpdateInstanceSnapshot requests that LXD updates the instance snapshot. -func (r *ProtocolLXD) UpdateInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPut, ETag string) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("snapshot_expiry") { - return nil, fmt.Errorf("The server is missing the required \"snapshot_expiry\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("PUT", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), instance, ETag) - if err != nil { - return nil, err - } - - return op, nil -} - -// GetInstanceState returns a InstanceState entry for the provided instance name. -func (r *ProtocolLXD) GetInstanceState(name string) (*api.InstanceState, string, error) { - var uri string - - if r.IsAgent() { - uri = "/state" - } else { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, "", err - } - - uri = fmt.Sprintf("%s/%s/state", path, url.PathEscape(name)) - } - - state := api.InstanceState{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", uri, nil, "", &state) - if err != nil { - return nil, "", err - } - - return &state, etag, nil -} - -// UpdateInstanceState updates the instance to match the requested state. -func (r *ProtocolLXD) UpdateInstanceState(name string, state api.InstanceStatePut, ETag string) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Send the request - op, _, err := r.queryOperation("PUT", fmt.Sprintf("%s/%s/state", path, url.PathEscape(name)), state, ETag) - if err != nil { - return nil, err - } - - return op, nil -} - -// GetInstanceLogfiles returns a list of logfiles for the instance. -func (r *ProtocolLXD) GetInstanceLogfiles(name string) ([]string, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("%s/%s/logs", path, url.PathEscape(name)) - _, err = r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetInstanceLogfile returns the content of the requested logfile. -// -// Note that it's the caller's responsibility to close the returned ReadCloser. -func (r *ProtocolLXD) GetInstanceLogfile(name string, filename string) (io.ReadCloser, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Prepare the HTTP request - url := fmt.Sprintf("%s/1.0%s/%s/logs/%s", r.httpBaseURL.String(), path, url.PathEscape(name), url.PathEscape(filename)) - - url, err = r.setQueryAttributes(url) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - } - - return resp.Body, err -} - -// DeleteInstanceLogfile deletes the requested logfile. -func (r *ProtocolLXD) DeleteInstanceLogfile(name string, filename string) error { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return err - } - - // Send the request - _, _, err = r.query("DELETE", fmt.Sprintf("%s/%s/logs/%s", path, url.PathEscape(name), url.PathEscape(filename)), nil, "") - if err != nil { - return err - } - - return nil -} - -// GetInstanceMetadata returns instance metadata. -func (r *ProtocolLXD) GetInstanceMetadata(name string) (*api.ImageMetadata, string, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, "", err - } - - if !r.HasExtension("container_edit_metadata") { - return nil, "", fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - metadata := api.ImageMetadata{} - - url := fmt.Sprintf("%s/%s/metadata", path, url.PathEscape(name)) - etag, err := r.queryStruct("GET", url, nil, "", &metadata) - if err != nil { - return nil, "", err - } - - return &metadata, etag, err -} - -// UpdateInstanceMetadata sets the content of the instance metadata file. -func (r *ProtocolLXD) UpdateInstanceMetadata(name string, metadata api.ImageMetadata, ETag string) error { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return err - } - - if !r.HasExtension("container_edit_metadata") { - return fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - url := fmt.Sprintf("%s/%s/metadata", path, url.PathEscape(name)) - _, _, err = r.query("PUT", url, metadata, ETag) - if err != nil { - return err - } - - return nil -} - -// GetInstanceTemplateFiles returns the list of names of template files for a instance. -func (r *ProtocolLXD) GetInstanceTemplateFiles(instanceName string) ([]string, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("container_edit_metadata") { - return nil, fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - templates := []string{} - - url := fmt.Sprintf("%s/%s/metadata/templates", path, url.PathEscape(instanceName)) - _, err = r.queryStruct("GET", url, nil, "", &templates) - if err != nil { - return nil, err - } - - return templates, nil -} - -// GetInstanceTemplateFile returns the content of a template file for a instance. -func (r *ProtocolLXD) GetInstanceTemplateFile(instanceName string, templateName string) (io.ReadCloser, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("container_edit_metadata") { - return nil, fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - url := fmt.Sprintf("%s/1.0%s/%s/metadata/templates?path=%s", r.httpBaseURL.String(), path, url.PathEscape(instanceName), url.QueryEscape(templateName)) - - url, err = r.setQueryAttributes(url) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - } - - return resp.Body, err -} - -// CreateInstanceTemplateFile creates an a template for a instance. -func (r *ProtocolLXD) CreateInstanceTemplateFile(instanceName string, templateName string, content io.ReadSeeker) error { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return err - } - - if !r.HasExtension("container_edit_metadata") { - return fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - url := fmt.Sprintf("%s/1.0%s/%s/metadata/templates?path=%s", r.httpBaseURL.String(), path, url.PathEscape(instanceName), url.QueryEscape(templateName)) - - url, err = r.setQueryAttributes(url) - if err != nil { - return err - } - - req, err := http.NewRequest("POST", url, content) - if err != nil { - return err - } - - req.Header.Set("Content-Type", "application/octet-stream") - - // Send the request - resp, err := r.DoHTTP(req) - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return err - } - } - return err -} - -// DeleteInstanceTemplateFile deletes a template file for a instance. -func (r *ProtocolLXD) DeleteInstanceTemplateFile(name string, templateName string) error { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return err - } - - if !r.HasExtension("container_edit_metadata") { - return fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") - } - - _, _, err = r.query("DELETE", fmt.Sprintf("%s/%s/metadata/templates?path=%s", path, url.PathEscape(name), url.QueryEscape(templateName)), nil, "") - return err -} - -// ConsoleInstance requests that LXD attaches to the console device of a instance. -func (r *ProtocolLXD) ConsoleInstance(instanceName string, console api.InstanceConsolePost, args *InstanceConsoleArgs) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("console") { - return nil, fmt.Errorf("The server is missing the required \"console\" API extension") - } - - if console.Type == "" { - console.Type = "console" - } - - if console.Type == "vga" && !r.HasExtension("console_vga_type") { - return nil, fmt.Errorf("The server is missing the required \"console_vga_type\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/console", path, url.PathEscape(instanceName)), console, "") - if err != nil { - return nil, err - } - - opAPI := op.Get() - - if args == nil || args.Terminal == nil { - return nil, fmt.Errorf("A terminal must be set") - } - - if args.Control == nil { - return nil, fmt.Errorf("A control channel must be set") - } - - // Parse the fds - fds := map[string]string{} - - value, ok := opAPI.Metadata["fds"] - if ok { - values := value.(map[string]any) - for k, v := range values { - fds[k] = v.(string) - } - } - - var controlConn *websocket.Conn - // Call the control handler with a connection to the control socket - if fds[api.SecretNameControl] == "" { - return nil, fmt.Errorf("Did not receive a file descriptor for the control channel") - } - - controlConn, err = r.GetOperationWebsocket(opAPI.ID, fds[api.SecretNameControl]) - if err != nil { - return nil, err - } - - go args.Control(controlConn) - - // Connect to the websocket - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) - if err != nil { - return nil, err - } - - // Detach from console. - go func(consoleDisconnect <-chan bool) { - <-consoleDisconnect - msg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "Detaching from console") - // We don't care if this fails. This is just for convenience. - _ = controlConn.WriteMessage(websocket.CloseMessage, msg) - _ = controlConn.Close() - }(args.ConsoleDisconnect) - - // And attach stdin and stdout to it - go func() { - shared.WebsocketSendStream(conn, args.Terminal, -1) - <-shared.WebsocketRecvStream(args.Terminal, conn) - _ = conn.Close() - }() - - return op, nil -} - -// ConsoleInstanceDynamic requests that LXD attaches to the console device of a -// instance with the possibility of opening multiple connections to it. -// -// Every time the returned 'console' function is called, a new connection will -// be established and proxied to the given io.ReadWriteCloser. -func (r *ProtocolLXD) ConsoleInstanceDynamic(instanceName string, console api.InstanceConsolePost, args *InstanceConsoleArgs) (Operation, func(io.ReadWriteCloser) error, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, nil, err - } - - if !r.HasExtension("console") { - return nil, nil, fmt.Errorf("The server is missing the required \"console\" API extension") - } - - if console.Type == "" { - console.Type = "console" - } - - if console.Type == "vga" && !r.HasExtension("console_vga_type") { - return nil, nil, fmt.Errorf("The server is missing the required \"console_vga_type\" API extension") - } - - // Send the request. - op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/console", path, url.PathEscape(instanceName)), console, "") - if err != nil { - return nil, nil, err - } - - opAPI := op.Get() - - if args == nil { - return nil, nil, fmt.Errorf("No arguments provided") - } - - if args.Control == nil { - return nil, nil, fmt.Errorf("A control channel must be set") - } - - // Parse the fds. - fds := map[string]string{} - - value, ok := opAPI.Metadata["fds"] - if ok { - values := value.(map[string]any) - for k, v := range values { - fds[k] = v.(string) - } - } - - // Call the control handler with a connection to the control socket. - if fds[api.SecretNameControl] == "" { - return nil, nil, fmt.Errorf("Did not receive a file descriptor for the control channel") - } - - controlConn, err := r.GetOperationWebsocket(opAPI.ID, fds[api.SecretNameControl]) - if err != nil { - return nil, nil, err - } - - go args.Control(controlConn) - - // Handle main disconnect. - go func(consoleDisconnect <-chan bool) { - <-consoleDisconnect - msg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "Detaching from console") - // We don't care if this fails. This is just for convenience. - _ = controlConn.WriteMessage(websocket.CloseMessage, msg) - _ = controlConn.Close() - }(args.ConsoleDisconnect) - - f := func(rwc io.ReadWriteCloser) error { - // Connect to the websocket. - conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) - if err != nil { - return err - } - - // Attach reader/writer. - shared.WebsocketSendStream(conn, rwc, -1) - <-shared.WebsocketRecvStream(rwc, conn) - _ = conn.Close() - - return nil - } - - return op, f, nil -} - -// GetInstanceConsoleLog requests that LXD attaches to the console device of a instance. -// -// Note that it's the caller's responsibility to close the returned ReadCloser. -func (r *ProtocolLXD) GetInstanceConsoleLog(instanceName string, args *InstanceConsoleLogArgs) (io.ReadCloser, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("console") { - return nil, fmt.Errorf("The server is missing the required \"console\" API extension") - } - - // Prepare the HTTP request - url := fmt.Sprintf("%s/1.0%s/%s/console", r.httpBaseURL.String(), path, url.PathEscape(instanceName)) - - url, err = r.setQueryAttributes(url) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - } - - return resp.Body, err -} - -// DeleteInstanceConsoleLog deletes the requested instance's console log. -func (r *ProtocolLXD) DeleteInstanceConsoleLog(instanceName string, args *InstanceConsoleLogArgs) error { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return err - } - - if !r.HasExtension("console") { - return fmt.Errorf("The server is missing the required \"console\" API extension") - } - - // Send the request - _, _, err = r.query("DELETE", fmt.Sprintf("%s/%s/console", path, url.PathEscape(instanceName)), nil, "") - if err != nil { - return err - } - - return nil -} - -// GetInstanceBackupNames returns a list of backup names for the instance. -func (r *ProtocolLXD) GetInstanceBackupNames(instanceName string) ([]string, error) { - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("%s/%s/backups", path, url.PathEscape(instanceName)) - _, err = r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetInstanceBackups returns a list of backups for the instance. -func (r *ProtocolLXD) GetInstanceBackups(instanceName string) ([]api.InstanceBackup, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Fetch the raw value - backups := []api.InstanceBackup{} - - _, err = r.queryStruct("GET", fmt.Sprintf("%s/%s/backups?recursion=1", path, url.PathEscape(instanceName)), nil, "", &backups) - if err != nil { - return nil, err - } - - return backups, nil -} - -// GetInstanceBackup returns a Backup struct for the provided instance and backup names. -func (r *ProtocolLXD) GetInstanceBackup(instanceName string, name string) (*api.InstanceBackup, string, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, "", err - } - - if !r.HasExtension("container_backup") { - return nil, "", fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Fetch the raw value - backup := api.InstanceBackup{} - etag, err := r.queryStruct("GET", fmt.Sprintf("%s/%s/backups/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), nil, "", &backup) - if err != nil { - return nil, "", err - } - - return &backup, etag, nil -} - -// CreateInstanceBackup requests that LXD creates a new backup for the instance. -func (r *ProtocolLXD) CreateInstanceBackup(instanceName string, backup api.InstanceBackupsPost) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/backups", path, url.PathEscape(instanceName)), backup, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// RenameInstanceBackup requests that LXD renames the backup. -func (r *ProtocolLXD) RenameInstanceBackup(instanceName string, name string, backup api.InstanceBackupPost) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/backups/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), backup, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteInstanceBackup requests that LXD deletes the instance backup. -func (r *ProtocolLXD) DeleteInstanceBackup(instanceName string, name string) (Operation, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("DELETE", fmt.Sprintf("%s/%s/backups/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// GetInstanceBackupFile requests the instance backup content. -func (r *ProtocolLXD) GetInstanceBackupFile(instanceName string, name string, req *BackupFileRequest) (*BackupFileResponse, error) { - path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) - if err != nil { - return nil, err - } - - if !r.HasExtension("container_backup") { - return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") - } - - // Build the URL - uri := fmt.Sprintf("%s/1.0%s/%s/backups/%s/export", r.httpBaseURL.String(), path, url.PathEscape(instanceName), url.PathEscape(name)) - if r.project != "" { - uri += fmt.Sprintf("?project=%s", url.QueryEscape(r.project)) - } - - // Prepare the download request - request, err := http.NewRequest("GET", uri, nil) - if err != nil { - return nil, err - } - - if r.httpUserAgent != "" { - request.Header.Set("User-Agent", r.httpUserAgent) - } - - // Start the request - response, doneCh, err := cancel.CancelableDownload(req.Canceler, r.http, request) - if err != nil { - return nil, err - } - - defer func() { _ = response.Body.Close() }() - defer close(doneCh) - - if response.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(response) - if err != nil { - return nil, err - } - } - - // Handle the data - body := response.Body - if req.ProgressHandler != nil { - body = &ioprogress.ProgressReader{ - ReadCloser: response.Body, - Tracker: &ioprogress.ProgressTracker{ - Length: response.ContentLength, - Handler: func(percent int64, speed int64) { - req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))}) - }, - }, - } - } - - size, err := io.Copy(req.BackupFile, body) - if err != nil { - return nil, err - } - - resp := BackupFileResponse{} - resp.Size = size - - return &resp, nil -} - -func (r *ProtocolLXD) proxyMigration(targetOp *operation, targetSecrets map[string]string, source InstanceServer, sourceOp *operation, sourceSecrets map[string]string) error { - // Quick checks. - for n := range targetSecrets { - _, ok := sourceSecrets[n] - if !ok { - return fmt.Errorf("Migration target expects the \"%s\" socket but source isn't providing it", n) - } - } - - if targetSecrets[api.SecretNameControl] == "" { - return fmt.Errorf("Migration target didn't setup the required \"control\" socket") - } - - // Struct used to hold everything together - type proxy struct { - done chan struct{} - sourceConn *websocket.Conn - targetConn *websocket.Conn - } - - proxies := map[string]*proxy{} - - // Connect the control socket - sourceConn, err := source.GetOperationWebsocket(sourceOp.ID, sourceSecrets[api.SecretNameControl]) - if err != nil { - return err - } - - targetConn, err := r.GetOperationWebsocket(targetOp.ID, targetSecrets[api.SecretNameControl]) - if err != nil { - return err - } - - proxies[api.SecretNameControl] = &proxy{ - done: shared.WebsocketProxy(sourceConn, targetConn), - sourceConn: sourceConn, - targetConn: targetConn, - } - - // Connect the data sockets - for name := range sourceSecrets { - if name == api.SecretNameControl { - continue - } - - // Handle resets (used for multiple objects) - sourceConn, err := source.GetOperationWebsocket(sourceOp.ID, sourceSecrets[name]) - if err != nil { - break - } - - targetConn, err := r.GetOperationWebsocket(targetOp.ID, targetSecrets[name]) - if err != nil { - break - } - - proxies[name] = &proxy{ - sourceConn: sourceConn, - targetConn: targetConn, - done: shared.WebsocketProxy(sourceConn, targetConn), - } - } - - // Cleanup once everything is done - go func() { - // Wait for control socket - <-proxies[api.SecretNameControl].done - _ = proxies[api.SecretNameControl].sourceConn.Close() - _ = proxies[api.SecretNameControl].targetConn.Close() - - // Then deal with the others - for name, proxy := range proxies { - if name == api.SecretNameControl { - continue - } - - <-proxy.done - _ = proxy.sourceConn.Close() - _ = proxy.targetConn.Close() - } - }() - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_network_acls.go b/vendor/github.com/lxc/lxd/client/lxd_network_acls.go deleted file mode 100644 index f5166d9c..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_network_acls.go +++ /dev/null @@ -1,159 +0,0 @@ -package lxd - -import ( - "fmt" - "io" - "net/http" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// GetNetworkACLNames returns a list of network ACL names. -func (r *ProtocolLXD) GetNetworkACLNames() ([]string, error) { - if !r.HasExtension("network_acl") { - return nil, fmt.Errorf(`The server is missing the required "network_acl" API extension`) - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := "/network-acls" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetNetworkACLs returns a list of Network ACL structs. -func (r *ProtocolLXD) GetNetworkACLs() ([]api.NetworkACL, error) { - if !r.HasExtension("network_acl") { - return nil, fmt.Errorf(`The server is missing the required "network_acl" API extension`) - } - - acls := []api.NetworkACL{} - - // Fetch the raw value. - _, err := r.queryStruct("GET", "/network-acls?recursion=1", nil, "", &acls) - if err != nil { - return nil, err - } - - return acls, nil -} - -// GetNetworkACL returns a Network ACL entry for the provided name. -func (r *ProtocolLXD) GetNetworkACL(name string) (*api.NetworkACL, string, error) { - if !r.HasExtension("network_acl") { - return nil, "", fmt.Errorf(`The server is missing the required "network_acl" API extension`) - } - - acl := api.NetworkACL{} - - // Fetch the raw value. - etag, err := r.queryStruct("GET", fmt.Sprintf("/network-acls/%s", url.PathEscape(name)), nil, "", &acl) - if err != nil { - return nil, "", err - } - - return &acl, etag, nil -} - -// GetNetworkACLLogfile returns a reader for the ACL log file. -// -// Note that it's the caller's responsibility to close the returned ReadCloser. -func (r *ProtocolLXD) GetNetworkACLLogfile(name string) (io.ReadCloser, error) { - if !r.HasExtension("network_acl_log") { - return nil, fmt.Errorf(`The server is missing the required "network_acl_log" API extension`) - } - - // Prepare the HTTP request - url := fmt.Sprintf("%s/1.0/network-acls/%s/log", r.httpBaseURL.String(), url.PathEscape(name)) - url, err := r.setQueryAttributes(url) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - // Send the request - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - // Check the return value for a cleaner error - if resp.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - } - - return resp.Body, err -} - -// CreateNetworkACL defines a new network ACL using the provided struct. -func (r *ProtocolLXD) CreateNetworkACL(acl api.NetworkACLsPost) error { - if !r.HasExtension("network_acl") { - return fmt.Errorf(`The server is missing the required "network_acl" API extension`) - } - - // Send the request. - _, _, err := r.query("POST", "/network-acls", acl, "") - if err != nil { - return err - } - - return nil -} - -// UpdateNetworkACL updates the network ACL to match the provided struct. -func (r *ProtocolLXD) UpdateNetworkACL(name string, acl api.NetworkACLPut, ETag string) error { - if !r.HasExtension("network_acl") { - return fmt.Errorf(`The server is missing the required "network_acl" API extension`) - } - - // Send the request. - _, _, err := r.query("PUT", fmt.Sprintf("/network-acls/%s", url.PathEscape(name)), acl, ETag) - if err != nil { - return err - } - - return nil -} - -// RenameNetworkACL renames an existing network ACL entry. -func (r *ProtocolLXD) RenameNetworkACL(name string, acl api.NetworkACLPost) error { - if !r.HasExtension("network_acl") { - return fmt.Errorf(`The server is missing the required "network_acl" API extension`) - } - - // Send the request. - _, _, err := r.query("POST", fmt.Sprintf("/network-acls/%s", url.PathEscape(name)), acl, "") - if err != nil { - return err - } - - return nil -} - -// DeleteNetworkACL deletes an existing network ACL. -func (r *ProtocolLXD) DeleteNetworkACL(name string) error { - if !r.HasExtension("network_acl") { - return fmt.Errorf(`The server is missing the required "network_acl" API extension`) - } - - // Send the request. - _, _, err := r.query("DELETE", fmt.Sprintf("/network-acls/%s", url.PathEscape(name)), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_network_forwards.go b/vendor/github.com/lxc/lxd/client/lxd_network_forwards.go deleted file mode 100644 index 22f1f8fc..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_network_forwards.go +++ /dev/null @@ -1,105 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// GetNetworkForwardAddresses returns a list of network forward listen addresses. -func (r *ProtocolLXD) GetNetworkForwardAddresses(networkName string) ([]string, error) { - if !r.HasExtension("network_forward") { - return nil, fmt.Errorf(`The server is missing the required "network_forward" API extension`) - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("/networks/%s/forwards", url.PathEscape(networkName)) - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetNetworkForwards returns a list of Network forward structs. -func (r *ProtocolLXD) GetNetworkForwards(networkName string) ([]api.NetworkForward, error) { - if !r.HasExtension("network_forward") { - return nil, fmt.Errorf(`The server is missing the required "network_forward" API extension`) - } - - forwards := []api.NetworkForward{} - - // Fetch the raw value. - _, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/forwards?recursion=1", url.PathEscape(networkName)), nil, "", &forwards) - if err != nil { - return nil, err - } - - return forwards, nil -} - -// GetNetworkForward returns a Network forward entry for the provided network and listen address. -func (r *ProtocolLXD) GetNetworkForward(networkName string, listenAddress string) (*api.NetworkForward, string, error) { - if !r.HasExtension("network_forward") { - return nil, "", fmt.Errorf(`The server is missing the required "network_forward" API extension`) - } - - forward := api.NetworkForward{} - - // Fetch the raw value. - etag, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/forwards/%s", url.PathEscape(networkName), url.PathEscape(listenAddress)), nil, "", &forward) - if err != nil { - return nil, "", err - } - - return &forward, etag, nil -} - -// CreateNetworkForward defines a new network forward using the provided struct. -func (r *ProtocolLXD) CreateNetworkForward(networkName string, forward api.NetworkForwardsPost) error { - if !r.HasExtension("network_forward") { - return fmt.Errorf(`The server is missing the required "network_forward" API extension`) - } - - // Send the request. - _, _, err := r.query("POST", fmt.Sprintf("/networks/%s/forwards", url.PathEscape(networkName)), forward, "") - if err != nil { - return err - } - - return nil -} - -// UpdateNetworkForward updates the network forward to match the provided struct. -func (r *ProtocolLXD) UpdateNetworkForward(networkName string, listenAddress string, forward api.NetworkForwardPut, ETag string) error { - if !r.HasExtension("network_forward") { - return fmt.Errorf(`The server is missing the required "network_forward" API extension`) - } - - // Send the request. - _, _, err := r.query("PUT", fmt.Sprintf("/networks/%s/forwards/%s", url.PathEscape(networkName), url.PathEscape(listenAddress)), forward, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteNetworkForward deletes an existing network forward. -func (r *ProtocolLXD) DeleteNetworkForward(networkName string, listenAddress string) error { - if !r.HasExtension("network_forward") { - return fmt.Errorf(`The server is missing the required "network_forward" API extension`) - } - - // Send the request. - _, _, err := r.query("DELETE", fmt.Sprintf("/networks/%s/forwards/%s", url.PathEscape(networkName), url.PathEscape(listenAddress)), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_network_load_balancers.go b/vendor/github.com/lxc/lxd/client/lxd_network_load_balancers.go deleted file mode 100644 index 147720c0..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_network_load_balancers.go +++ /dev/null @@ -1,113 +0,0 @@ -package lxd - -import ( - "github.com/lxc/lxd/shared/api" -) - -// GetNetworkLoadBalancerAddresses returns a list of network load balancer listen addresses. -func (r *ProtocolLXD) GetNetworkLoadBalancerAddresses(networkName string) ([]string, error) { - err := r.CheckExtension("network_load_balancer") - if err != nil { - return nil, err - } - - // Fetch the raw URL values. - urls := []string{} - u := api.NewURL().Path("networks", networkName, "load-balancers") - _, err = r.queryStruct("GET", u.String(), nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(u.String(), urls...) -} - -// GetNetworkLoadBalancers returns a list of Network load balancer structs. -func (r *ProtocolLXD) GetNetworkLoadBalancers(networkName string) ([]api.NetworkLoadBalancer, error) { - err := r.CheckExtension("network_load_balancer") - if err != nil { - return nil, err - } - - loadBalancers := []api.NetworkLoadBalancer{} - - // Fetch the raw value. - u := api.NewURL().Path("networks", networkName, "load-balancers").WithQuery("recursion", "1") - _, err = r.queryStruct("GET", u.String(), nil, "", &loadBalancers) - if err != nil { - return nil, err - } - - return loadBalancers, nil -} - -// GetNetworkLoadBalancer returns a Network load balancer entry for the provided network and listen address. -func (r *ProtocolLXD) GetNetworkLoadBalancer(networkName string, listenAddress string) (*api.NetworkLoadBalancer, string, error) { - err := r.CheckExtension("network_load_balancer") - if err != nil { - return nil, "", err - } - - loadBalancer := api.NetworkLoadBalancer{} - - // Fetch the raw value. - u := api.NewURL().Path("networks", networkName, "load-balancers", listenAddress) - etag, err := r.queryStruct("GET", u.String(), nil, "", &loadBalancer) - if err != nil { - return nil, "", err - } - - return &loadBalancer, etag, nil -} - -// CreateNetworkLoadBalancer defines a new network load balancer using the provided struct. -func (r *ProtocolLXD) CreateNetworkLoadBalancer(networkName string, loadBalancer api.NetworkLoadBalancersPost) error { - err := r.CheckExtension("network_load_balancer") - if err != nil { - return err - } - - // Send the request. - u := api.NewURL().Path("networks", networkName, "load-balancers") - _, _, err = r.query("POST", u.String(), loadBalancer, "") - if err != nil { - return err - } - - return nil -} - -// UpdateNetworkLoadBalancer updates the network load balancer to match the provided struct. -func (r *ProtocolLXD) UpdateNetworkLoadBalancer(networkName string, listenAddress string, loadBalancer api.NetworkLoadBalancerPut, ETag string) error { - err := r.CheckExtension("network_load_balancer") - if err != nil { - return err - } - - // Send the request. - u := api.NewURL().Path("networks", networkName, "load-balancers", listenAddress) - _, _, err = r.query("PUT", u.String(), loadBalancer, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteNetworkLoadBalancer deletes an existing network load balancer. -func (r *ProtocolLXD) DeleteNetworkLoadBalancer(networkName string, listenAddress string) error { - err := r.CheckExtension("network_load_balancer") - if err != nil { - return err - } - - // Send the request. - u := api.NewURL().Path("networks", networkName, "load-balancers", listenAddress) - _, _, err = r.query("DELETE", u.String(), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_network_peer.go b/vendor/github.com/lxc/lxd/client/lxd_network_peer.go deleted file mode 100644 index 80423683..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_network_peer.go +++ /dev/null @@ -1,106 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// GetNetworkPeerNames returns a list of network peer names. -func (r *ProtocolLXD) GetNetworkPeerNames(networkName string) ([]string, error) { - if !r.HasExtension("network_peer") { - return nil, fmt.Errorf(`The server is missing the required "network_peer" API extension`) - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("/networks/%s/peers", url.PathEscape(networkName)) - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetNetworkPeers returns a list of network peer structs. -func (r *ProtocolLXD) GetNetworkPeers(networkName string) ([]api.NetworkPeer, error) { - if !r.HasExtension("network_peer") { - return nil, fmt.Errorf(`The server is missing the required "network_peer" API extension`) - } - - peers := []api.NetworkPeer{} - - // Fetch the raw value. - _, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/peers?recursion=1", url.PathEscape(networkName)), nil, "", &peers) - if err != nil { - return nil, err - } - - return peers, nil -} - -// GetNetworkPeer returns a network peer entry for the provided network and peer name. -func (r *ProtocolLXD) GetNetworkPeer(networkName string, peerName string) (*api.NetworkPeer, string, error) { - if !r.HasExtension("network_peer") { - return nil, "", fmt.Errorf(`The server is missing the required "network_peer" API extension`) - } - - peer := api.NetworkPeer{} - - // Fetch the raw value. - etag, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/peers/%s", url.PathEscape(networkName), url.PathEscape(peerName)), nil, "", &peer) - if err != nil { - return nil, "", err - } - - return &peer, etag, nil -} - -// CreateNetworkPeer defines a new network peer using the provided struct. -// Returns true if the peer connection has been mutually created. Returns false if peering has been only initiated. -func (r *ProtocolLXD) CreateNetworkPeer(networkName string, peer api.NetworkPeersPost) error { - if !r.HasExtension("network_peer") { - return fmt.Errorf(`The server is missing the required "network_peer" API extension`) - } - - // Send the request. - _, _, err := r.query("POST", fmt.Sprintf("/networks/%s/peers", url.PathEscape(networkName)), peer, "") - if err != nil { - return err - } - - return nil -} - -// UpdateNetworkPeer updates the network peer to match the provided struct. -func (r *ProtocolLXD) UpdateNetworkPeer(networkName string, peerName string, peer api.NetworkPeerPut, ETag string) error { - if !r.HasExtension("network_peer") { - return fmt.Errorf(`The server is missing the required "network_peer" API extension`) - } - - // Send the request. - _, _, err := r.query("PUT", fmt.Sprintf("/networks/%s/peers/%s", url.PathEscape(networkName), url.PathEscape(peerName)), peer, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteNetworkPeer deletes an existing network peer. -func (r *ProtocolLXD) DeleteNetworkPeer(networkName string, peerName string) error { - if !r.HasExtension("network_peer") { - return fmt.Errorf(`The server is missing the required "network_peer" API extension`) - } - - // Send the request. - _, _, err := r.query("DELETE", fmt.Sprintf("/networks/%s/peers/%s", url.PathEscape(networkName), url.PathEscape(peerName)), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_network_zones.go b/vendor/github.com/lxc/lxd/client/lxd_network_zones.go deleted file mode 100644 index 50a62b2d..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_network_zones.go +++ /dev/null @@ -1,202 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// GetNetworkZoneNames returns a list of network zone names. -func (r *ProtocolLXD) GetNetworkZoneNames() ([]string, error) { - if !r.HasExtension("network_dns") { - return nil, fmt.Errorf(`The server is missing the required "network_dns" API extension`) - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := "/network-zones" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetNetworkZones returns a list of Network zone structs. -func (r *ProtocolLXD) GetNetworkZones() ([]api.NetworkZone, error) { - if !r.HasExtension("network_dns") { - return nil, fmt.Errorf(`The server is missing the required "network_dns" API extension`) - } - - zones := []api.NetworkZone{} - - // Fetch the raw value. - _, err := r.queryStruct("GET", "/network-zones?recursion=1", nil, "", &zones) - if err != nil { - return nil, err - } - - return zones, nil -} - -// GetNetworkZone returns a Network zone entry for the provided name. -func (r *ProtocolLXD) GetNetworkZone(name string) (*api.NetworkZone, string, error) { - if !r.HasExtension("network_dns") { - return nil, "", fmt.Errorf(`The server is missing the required "network_dns" API extension`) - } - - zone := api.NetworkZone{} - - // Fetch the raw value. - etag, err := r.queryStruct("GET", fmt.Sprintf("/network-zones/%s", url.PathEscape(name)), nil, "", &zone) - if err != nil { - return nil, "", err - } - - return &zone, etag, nil -} - -// CreateNetworkZone defines a new Network zone using the provided struct. -func (r *ProtocolLXD) CreateNetworkZone(zone api.NetworkZonesPost) error { - if !r.HasExtension("network_dns") { - return fmt.Errorf(`The server is missing the required "network_dns" API extension`) - } - - // Send the request. - _, _, err := r.query("POST", "/network-zones", zone, "") - if err != nil { - return err - } - - return nil -} - -// UpdateNetworkZone updates the network zone to match the provided struct. -func (r *ProtocolLXD) UpdateNetworkZone(name string, zone api.NetworkZonePut, ETag string) error { - if !r.HasExtension("network_dns") { - return fmt.Errorf(`The server is missing the required "network_dns" API extension`) - } - - // Send the request. - _, _, err := r.query("PUT", fmt.Sprintf("/network-zones/%s", url.PathEscape(name)), zone, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteNetworkZone deletes an existing network zone. -func (r *ProtocolLXD) DeleteNetworkZone(name string) error { - if !r.HasExtension("network_dns") { - return fmt.Errorf(`The server is missing the required "network_dns" API extension`) - } - - // Send the request. - _, _, err := r.query("DELETE", fmt.Sprintf("/network-zones/%s", url.PathEscape(name)), nil, "") - if err != nil { - return err - } - - return nil -} - -// GetNetworkZoneRecordNames returns a list of network zone record names. -func (r *ProtocolLXD) GetNetworkZoneRecordNames(zone string) ([]string, error) { - if !r.HasExtension("network_dns_records") { - return nil, fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("/network-zones/%s/records", url.PathEscape(zone)) - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetNetworkZoneRecords returns a list of Network zone record structs. -func (r *ProtocolLXD) GetNetworkZoneRecords(zone string) ([]api.NetworkZoneRecord, error) { - if !r.HasExtension("network_dns_records") { - return nil, fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) - } - - records := []api.NetworkZoneRecord{} - - // Fetch the raw value. - _, err := r.queryStruct("GET", fmt.Sprintf("/network-zones/%s/records?recursion=1", url.PathEscape(zone)), nil, "", &records) - if err != nil { - return nil, err - } - - return records, nil -} - -// GetNetworkZoneRecord returns a Network zone record entry for the provided zone and name. -func (r *ProtocolLXD) GetNetworkZoneRecord(zone string, name string) (*api.NetworkZoneRecord, string, error) { - if !r.HasExtension("network_dns_records") { - return nil, "", fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) - } - - record := api.NetworkZoneRecord{} - - // Fetch the raw value. - etag, err := r.queryStruct("GET", fmt.Sprintf("/network-zones/%s/records/%s", url.PathEscape(zone), url.PathEscape(name)), nil, "", &record) - if err != nil { - return nil, "", err - } - - return &record, etag, nil -} - -// CreateNetworkZoneRecord defines a new Network zone record using the provided struct. -func (r *ProtocolLXD) CreateNetworkZoneRecord(zone string, record api.NetworkZoneRecordsPost) error { - if !r.HasExtension("network_dns_records") { - return fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) - } - - // Send the request. - _, _, err := r.query("POST", fmt.Sprintf("/network-zones/%s/records", url.PathEscape(zone)), record, "") - if err != nil { - return err - } - - return nil -} - -// UpdateNetworkZoneRecord updates the network zone record to match the provided struct. -func (r *ProtocolLXD) UpdateNetworkZoneRecord(zone string, name string, record api.NetworkZoneRecordPut, ETag string) error { - if !r.HasExtension("network_dns_records") { - return fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) - } - - // Send the request. - _, _, err := r.query("PUT", fmt.Sprintf("/network-zones/%s/records/%s", url.PathEscape(zone), url.PathEscape(name)), record, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteNetworkZoneRecord deletes an existing network zone record. -func (r *ProtocolLXD) DeleteNetworkZoneRecord(zone string, name string) error { - if !r.HasExtension("network_dns_records") { - return fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) - } - - // Send the request. - _, _, err := r.query("DELETE", fmt.Sprintf("/network-zones/%s/records/%s", url.PathEscape(zone), url.PathEscape(name)), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_networks.go b/vendor/github.com/lxc/lxd/client/lxd_networks.go deleted file mode 100644 index c8200700..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_networks.go +++ /dev/null @@ -1,154 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// GetNetworkNames returns a list of network names. -func (r *ProtocolLXD) GetNetworkNames() ([]string, error) { - if !r.HasExtension("network") { - return nil, fmt.Errorf("The server is missing the required \"network\" API extension") - } - - // Fetch the raw values. - urls := []string{} - baseURL := "/networks" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetNetworks returns a list of Network struct. -func (r *ProtocolLXD) GetNetworks() ([]api.Network, error) { - if !r.HasExtension("network") { - return nil, fmt.Errorf("The server is missing the required \"network\" API extension") - } - - networks := []api.Network{} - - // Fetch the raw value - _, err := r.queryStruct("GET", "/networks?recursion=1", nil, "", &networks) - if err != nil { - return nil, err - } - - return networks, nil -} - -// GetNetwork returns a Network entry for the provided name. -func (r *ProtocolLXD) GetNetwork(name string) (*api.Network, string, error) { - if !r.HasExtension("network") { - return nil, "", fmt.Errorf("The server is missing the required \"network\" API extension") - } - - network := api.Network{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s", url.PathEscape(name)), nil, "", &network) - if err != nil { - return nil, "", err - } - - return &network, etag, nil -} - -// GetNetworkLeases returns a list of Network struct. -func (r *ProtocolLXD) GetNetworkLeases(name string) ([]api.NetworkLease, error) { - if !r.HasExtension("network_leases") { - return nil, fmt.Errorf("The server is missing the required \"network_leases\" API extension") - } - - leases := []api.NetworkLease{} - - // Fetch the raw value - _, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/leases", url.PathEscape(name)), nil, "", &leases) - if err != nil { - return nil, err - } - - return leases, nil -} - -// GetNetworkState returns metrics and information on the running network. -func (r *ProtocolLXD) GetNetworkState(name string) (*api.NetworkState, error) { - if !r.HasExtension("network_state") { - return nil, fmt.Errorf("The server is missing the required \"network_state\" API extension") - } - - state := api.NetworkState{} - - // Fetch the raw value - _, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/state", url.PathEscape(name)), nil, "", &state) - if err != nil { - return nil, err - } - - return &state, nil -} - -// CreateNetwork defines a new network using the provided Network struct. -func (r *ProtocolLXD) CreateNetwork(network api.NetworksPost) error { - if !r.HasExtension("network") { - return fmt.Errorf("The server is missing the required \"network\" API extension") - } - - // Send the request - _, _, err := r.query("POST", "/networks", network, "") - if err != nil { - return err - } - - return nil -} - -// UpdateNetwork updates the network to match the provided Network struct. -func (r *ProtocolLXD) UpdateNetwork(name string, network api.NetworkPut, ETag string) error { - if !r.HasExtension("network") { - return fmt.Errorf("The server is missing the required \"network\" API extension") - } - - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/networks/%s", url.PathEscape(name)), network, ETag) - if err != nil { - return err - } - - return nil -} - -// RenameNetwork renames an existing network entry. -func (r *ProtocolLXD) RenameNetwork(name string, network api.NetworkPost) error { - if !r.HasExtension("network") { - return fmt.Errorf("The server is missing the required \"network\" API extension") - } - - // Send the request - _, _, err := r.query("POST", fmt.Sprintf("/networks/%s", url.PathEscape(name)), network, "") - if err != nil { - return err - } - - return nil -} - -// DeleteNetwork deletes an existing network. -func (r *ProtocolLXD) DeleteNetwork(name string) error { - if !r.HasExtension("network") { - return fmt.Errorf("The server is missing the required \"network\" API extension") - } - - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/networks/%s", url.PathEscape(name)), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_operations.go b/vendor/github.com/lxc/lxd/client/lxd_operations.go deleted file mode 100644 index 86287339..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_operations.go +++ /dev/null @@ -1,103 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/gorilla/websocket" - - "github.com/lxc/lxd/shared/api" -) - -// GetOperationUUIDs returns a list of operation uuids. -func (r *ProtocolLXD) GetOperationUUIDs() ([]string, error) { - // Fetch the raw URL values. - urls := []string{} - baseURL := "/operations" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetOperations returns a list of Operation struct. -func (r *ProtocolLXD) GetOperations() ([]api.Operation, error) { - apiOperations := map[string][]api.Operation{} - - // Fetch the raw value - _, err := r.queryStruct("GET", "/operations?recursion=1", nil, "", &apiOperations) - if err != nil { - return nil, err - } - - // Turn it into just a list of operations - operations := []api.Operation{} - for _, v := range apiOperations { - operations = append(operations, v...) - } - - return operations, nil -} - -// GetOperation returns an Operation entry for the provided uuid. -func (r *ProtocolLXD) GetOperation(uuid string) (*api.Operation, string, error) { - op := api.Operation{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/operations/%s", url.PathEscape(uuid)), nil, "", &op) - if err != nil { - return nil, "", err - } - - return &op, etag, nil -} - -// GetOperationWait returns an Operation entry for the provided uuid once it's complete or hits the timeout. -func (r *ProtocolLXD) GetOperationWait(uuid string, timeout int) (*api.Operation, string, error) { - op := api.Operation{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/operations/%s/wait?timeout=%d", url.PathEscape(uuid), timeout), nil, "", &op) - if err != nil { - return nil, "", err - } - - return &op, etag, nil -} - -// GetOperationWaitSecret returns an Operation entry for the provided uuid and secret once it's complete or hits the timeout. -func (r *ProtocolLXD) GetOperationWaitSecret(uuid string, secret string, timeout int) (*api.Operation, string, error) { - op := api.Operation{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/operations/%s/wait?secret=%s&timeout=%d", url.PathEscape(uuid), url.PathEscape(secret), timeout), nil, "", &op) - if err != nil { - return nil, "", err - } - - return &op, etag, nil -} - -// GetOperationWebsocket returns a websocket connection for the provided operation. -func (r *ProtocolLXD) GetOperationWebsocket(uuid string, secret string) (*websocket.Conn, error) { - path := fmt.Sprintf("/operations/%s/websocket", url.PathEscape(uuid)) - if secret != "" { - path = fmt.Sprintf("%s?secret=%s", path, url.QueryEscape(secret)) - } - - return r.websocket(path) -} - -// DeleteOperation deletes (cancels) a running operation. -func (r *ProtocolLXD) DeleteOperation(uuid string) error { - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/operations/%s", url.PathEscape(uuid)), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_profiles.go b/vendor/github.com/lxc/lxd/client/lxd_profiles.go deleted file mode 100644 index 215c3846..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_profiles.go +++ /dev/null @@ -1,94 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// Profile handling functions - -// GetProfileNames returns a list of available profile names. -func (r *ProtocolLXD) GetProfileNames() ([]string, error) { - // Fetch the raw URL values. - urls := []string{} - baseURL := "/profiles" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetProfiles returns a list of available Profile structs. -func (r *ProtocolLXD) GetProfiles() ([]api.Profile, error) { - profiles := []api.Profile{} - - // Fetch the raw value - _, err := r.queryStruct("GET", "/profiles?recursion=1", nil, "", &profiles) - if err != nil { - return nil, err - } - - return profiles, nil -} - -// GetProfile returns a Profile entry for the provided name. -func (r *ProtocolLXD) GetProfile(name string) (*api.Profile, string, error) { - profile := api.Profile{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/profiles/%s", url.PathEscape(name)), nil, "", &profile) - if err != nil { - return nil, "", err - } - - return &profile, etag, nil -} - -// CreateProfile defines a new container profile. -func (r *ProtocolLXD) CreateProfile(profile api.ProfilesPost) error { - // Send the request - _, _, err := r.query("POST", "/profiles", profile, "") - if err != nil { - return err - } - - return nil -} - -// UpdateProfile updates the profile to match the provided Profile struct. -func (r *ProtocolLXD) UpdateProfile(name string, profile api.ProfilePut, ETag string) error { - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/profiles/%s", url.PathEscape(name)), profile, ETag) - if err != nil { - return err - } - - return nil -} - -// RenameProfile renames an existing profile entry. -func (r *ProtocolLXD) RenameProfile(name string, profile api.ProfilePost) error { - // Send the request - _, _, err := r.query("POST", fmt.Sprintf("/profiles/%s", url.PathEscape(name)), profile, "") - if err != nil { - return err - } - - return nil -} - -// DeleteProfile deletes a profile. -func (r *ProtocolLXD) DeleteProfile(name string) error { - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/profiles/%s", url.PathEscape(name)), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_projects.go b/vendor/github.com/lxc/lxd/client/lxd_projects.go deleted file mode 100644 index f0502d30..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_projects.go +++ /dev/null @@ -1,139 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// Project handling functions - -// GetProjectNames returns a list of available project names. -func (r *ProtocolLXD) GetProjectNames() ([]string, error) { - if !r.HasExtension("projects") { - return nil, fmt.Errorf("The server is missing the required \"projects\" API extension") - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := "/projects" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetProjects returns a list of available Project structs. -func (r *ProtocolLXD) GetProjects() ([]api.Project, error) { - if !r.HasExtension("projects") { - return nil, fmt.Errorf("The server is missing the required \"projects\" API extension") - } - - projects := []api.Project{} - - // Fetch the raw value - _, err := r.queryStruct("GET", "/projects?recursion=1", nil, "", &projects) - if err != nil { - return nil, err - } - - return projects, nil -} - -// GetProject returns a Project entry for the provided name. -func (r *ProtocolLXD) GetProject(name string) (*api.Project, string, error) { - if !r.HasExtension("projects") { - return nil, "", fmt.Errorf("The server is missing the required \"projects\" API extension") - } - - project := api.Project{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/projects/%s", url.PathEscape(name)), nil, "", &project) - if err != nil { - return nil, "", err - } - - return &project, etag, nil -} - -// GetProjectState returns a Project state for the provided name. -func (r *ProtocolLXD) GetProjectState(name string) (*api.ProjectState, error) { - if !r.HasExtension("project_usage") { - return nil, fmt.Errorf("The server is missing the required \"project_usage\" API extension") - } - - projectState := api.ProjectState{} - - // Fetch the raw value - _, err := r.queryStruct("GET", fmt.Sprintf("/projects/%s/state", url.PathEscape(name)), nil, "", &projectState) - if err != nil { - return nil, err - } - - return &projectState, nil -} - -// CreateProject defines a new container project. -func (r *ProtocolLXD) CreateProject(project api.ProjectsPost) error { - if !r.HasExtension("projects") { - return fmt.Errorf("The server is missing the required \"projects\" API extension") - } - - // Send the request - _, _, err := r.query("POST", "/projects", project, "") - if err != nil { - return err - } - - return nil -} - -// UpdateProject updates the project to match the provided Project struct. -func (r *ProtocolLXD) UpdateProject(name string, project api.ProjectPut, ETag string) error { - if !r.HasExtension("projects") { - return fmt.Errorf("The server is missing the required \"projects\" API extension") - } - - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/projects/%s", url.PathEscape(name)), project, ETag) - if err != nil { - return err - } - - return nil -} - -// RenameProject renames an existing project entry. -func (r *ProtocolLXD) RenameProject(name string, project api.ProjectPost) (Operation, error) { - if !r.HasExtension("projects") { - return nil, fmt.Errorf("The server is missing the required \"projects\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/projects/%s", url.PathEscape(name)), project, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteProject deletes a project. -func (r *ProtocolLXD) DeleteProject(name string) error { - if !r.HasExtension("projects") { - return fmt.Errorf("The server is missing the required \"projects\" API extension") - } - - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/projects/%s", url.PathEscape(name)), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_server.go b/vendor/github.com/lxc/lxd/client/lxd_server.go deleted file mode 100644 index a59abd09..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_server.go +++ /dev/null @@ -1,193 +0,0 @@ -package lxd - -import ( - "fmt" - "io" - "net/http" - - "github.com/gorilla/websocket" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" -) - -// Server handling functions - -// GetServer returns the server status as a Server struct. -func (r *ProtocolLXD) GetServer() (*api.Server, string, error) { - server := api.Server{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", "", nil, "", &server) - if err != nil { - return nil, "", err - } - - // Fill in certificate fingerprint if not provided - if server.Environment.CertificateFingerprint == "" && server.Environment.Certificate != "" { - var err error - server.Environment.CertificateFingerprint, err = shared.CertFingerprintStr(server.Environment.Certificate) - if err != nil { - return nil, "", err - } - } - - if !server.Public && len(server.AuthMethods) == 0 { - // TLS is always available for LXD servers - server.AuthMethods = []string{"tls"} - } - - // Add the value to the cache - r.server = &server - - return &server, etag, nil -} - -// UpdateServer updates the server status to match the provided Server struct. -func (r *ProtocolLXD) UpdateServer(server api.ServerPut, ETag string) error { - // Send the request - _, _, err := r.query("PUT", "", server, ETag) - if err != nil { - return err - } - - return nil -} - -// HasExtension returns true if the server supports a given API extension. -func (r *ProtocolLXD) HasExtension(extension string) bool { - // If no cached API information, just assume we're good - // This is needed for those rare cases where we must avoid a GetServer call - if r.server == nil { - return true - } - - for _, entry := range r.server.APIExtensions { - if entry == extension { - return true - } - } - - return false -} - -// CheckExtension checks if the server has the specified extension. -func (r *ProtocolLXD) CheckExtension(extensionName string) error { - if !r.HasExtension(extensionName) { - return fmt.Errorf("The server is missing the required %q API extension", extensionName) - } - - return nil -} - -// IsClustered returns true if the server is part of a LXD cluster. -func (r *ProtocolLXD) IsClustered() bool { - return r.server.Environment.ServerClustered -} - -// GetServerResources returns the resources available to a given LXD server. -func (r *ProtocolLXD) GetServerResources() (*api.Resources, error) { - if !r.HasExtension("resources") { - return nil, fmt.Errorf("The server is missing the required \"resources\" API extension") - } - - resources := api.Resources{} - - // Fetch the raw value - _, err := r.queryStruct("GET", "/resources", nil, "", &resources) - if err != nil { - return nil, err - } - - return &resources, nil -} - -// UseProject returns a client that will use a specific project. -func (r *ProtocolLXD) UseProject(name string) InstanceServer { - return &ProtocolLXD{ - ctx: r.ctx, - ctxConnected: r.ctxConnected, - ctxConnectedCancel: r.ctxConnectedCancel, - server: r.server, - http: r.http, - httpCertificate: r.httpCertificate, - httpBaseURL: r.httpBaseURL, - httpProtocol: r.httpProtocol, - httpUserAgent: r.httpUserAgent, - bakeryClient: r.bakeryClient, - bakeryInteractor: r.bakeryInteractor, - requireAuthenticated: r.requireAuthenticated, - clusterTarget: r.clusterTarget, - project: name, - eventConns: make(map[string]*websocket.Conn), // New project specific listener conns. - eventListeners: make(map[string][]*EventListener), // New project specific listeners. - } -} - -// UseTarget returns a client that will target a specific cluster member. -// Use this member-specific operations such as specific container -// placement, preparing a new storage pool or network, ... -func (r *ProtocolLXD) UseTarget(name string) InstanceServer { - return &ProtocolLXD{ - ctx: r.ctx, - ctxConnected: r.ctxConnected, - ctxConnectedCancel: r.ctxConnectedCancel, - server: r.server, - http: r.http, - httpCertificate: r.httpCertificate, - httpBaseURL: r.httpBaseURL, - httpProtocol: r.httpProtocol, - httpUserAgent: r.httpUserAgent, - bakeryClient: r.bakeryClient, - bakeryInteractor: r.bakeryInteractor, - requireAuthenticated: r.requireAuthenticated, - project: r.project, - eventConns: make(map[string]*websocket.Conn), // New target specific listener conns. - eventListeners: make(map[string][]*EventListener), // New target specific listeners. - clusterTarget: name, - } -} - -// IsAgent returns true if the server is a LXD agent. -func (r *ProtocolLXD) IsAgent() bool { - return r.server != nil && r.server.Environment.Server == "lxd-agent" -} - -// GetMetrics returns the text OpenMetrics data. -func (r *ProtocolLXD) GetMetrics() (string, error) { - // Check that the server supports it. - if !r.HasExtension("metrics") { - return "", fmt.Errorf("The server is missing the required \"metrics\" API extension") - } - - // Prepare the request. - requestURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0/metrics", r.httpBaseURL.String())) - if err != nil { - return "", err - } - - req, err := http.NewRequest("GET", requestURL, nil) - if err != nil { - return "", err - } - - // Send the request. - resp, err := r.DoHTTP(req) - if err != nil { - return "", err - } - - defer func() { _ = resp.Body.Close() }() - - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("Bad HTTP status: %d", resp.StatusCode) - } - - // Get the content. - content, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - - return string(content), nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_storage_buckets.go b/vendor/github.com/lxc/lxd/client/lxd_storage_buckets.go deleted file mode 100644 index 5e1e3211..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_storage_buckets.go +++ /dev/null @@ -1,235 +0,0 @@ -package lxd - -import ( - "github.com/lxc/lxd/shared/api" -) - -// GetStoragePoolBucketNames returns a list of storage bucket names. -func (r *ProtocolLXD) GetStoragePoolBucketNames(poolName string) ([]string, error) { - err := r.CheckExtension("storage_buckets") - if err != nil { - return nil, err - } - - // Fetch the raw URL values. - urls := []string{} - u := api.NewURL().Path("storage-pools", poolName, "buckets") - _, err = r.queryStruct("GET", u.String(), nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(u.String(), urls...) -} - -// GetStoragePoolBuckets returns a list of storage buckets for the provided pool. -func (r *ProtocolLXD) GetStoragePoolBuckets(poolName string) ([]api.StorageBucket, error) { - err := r.CheckExtension("storage_buckets") - if err != nil { - return nil, err - } - - buckets := []api.StorageBucket{} - - // Fetch the raw value. - u := api.NewURL().Path("storage-pools", poolName, "buckets").WithQuery("recursion", "1") - _, err = r.queryStruct("GET", u.String(), nil, "", &buckets) - if err != nil { - return nil, err - } - - return buckets, nil -} - -// GetStoragePoolBucket returns a storage bucket entry for the provided pool and bucket name. -func (r *ProtocolLXD) GetStoragePoolBucket(poolName string, bucketName string) (*api.StorageBucket, string, error) { - err := r.CheckExtension("storage_buckets") - if err != nil { - return nil, "", err - } - - bucket := api.StorageBucket{} - - // Fetch the raw value. - u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName) - etag, err := r.queryStruct("GET", u.String(), nil, "", &bucket) - if err != nil { - return nil, "", err - } - - return &bucket, etag, nil -} - -// CreateStoragePoolBucket defines a new storage bucket using the provided struct. -// If the server supports storage_buckets_create_credentials API extension, then this function will return the -// initial admin credentials. Otherwise it will be nil. -func (r *ProtocolLXD) CreateStoragePoolBucket(poolName string, bucket api.StorageBucketsPost) (*api.StorageBucketKey, error) { - err := r.CheckExtension("storage_buckets") - if err != nil { - return nil, err - } - - u := api.NewURL().Path("storage-pools", poolName, "buckets") - - // Send the request and get the resulting key info (including generated keys). - if r.HasExtension("storage_buckets_create_credentials") { - var newKey api.StorageBucketKey - _, err = r.queryStruct("POST", u.String(), bucket, "", &newKey) - if err != nil { - return nil, err - } - - return &newKey, nil - } - - _, _, err = r.query("POST", u.String(), bucket, "") - if err != nil { - return nil, err - } - - return nil, nil -} - -// UpdateStoragePoolBucket updates the storage bucket to match the provided struct. -func (r *ProtocolLXD) UpdateStoragePoolBucket(poolName string, bucketName string, bucket api.StorageBucketPut, ETag string) error { - err := r.CheckExtension("storage_buckets") - if err != nil { - return err - } - - // Send the request. - u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName) - _, _, err = r.query("PUT", u.String(), bucket, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteStoragePoolBucket deletes an existing storage bucket. -func (r *ProtocolLXD) DeleteStoragePoolBucket(poolName string, bucketName string) error { - err := r.CheckExtension("storage_buckets") - if err != nil { - return err - } - - // Send the request. - u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName) - _, _, err = r.query("DELETE", u.String(), nil, "") - if err != nil { - return err - } - - return nil -} - -// GetStoragePoolBucketKeyNames returns a list of storage bucket key names. -func (r *ProtocolLXD) GetStoragePoolBucketKeyNames(poolName string, bucketName string) ([]string, error) { - err := r.CheckExtension("storage_buckets") - if err != nil { - return nil, err - } - - // Fetch the raw URL values. - urls := []string{} - u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys") - _, err = r.queryStruct("GET", u.String(), nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(u.String(), urls...) -} - -// GetStoragePoolBucketKeys returns a list of storage bucket keys for the provided pool and bucket. -func (r *ProtocolLXD) GetStoragePoolBucketKeys(poolName string, bucketName string) ([]api.StorageBucketKey, error) { - err := r.CheckExtension("storage_buckets") - if err != nil { - return nil, err - } - - bucketKeys := []api.StorageBucketKey{} - - // Fetch the raw value. - u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys").WithQuery("recursion", "1") - _, err = r.queryStruct("GET", u.String(), nil, "", &bucketKeys) - if err != nil { - return nil, err - } - - return bucketKeys, nil -} - -// GetStoragePoolBucketKey returns a storage bucket key entry for the provided pool, bucket and key name. -func (r *ProtocolLXD) GetStoragePoolBucketKey(poolName string, bucketName string, keyName string) (*api.StorageBucketKey, string, error) { - err := r.CheckExtension("storage_buckets") - if err != nil { - return nil, "", err - } - - bucketKey := api.StorageBucketKey{} - - // Fetch the raw value. - u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys", keyName) - etag, err := r.queryStruct("GET", u.String(), nil, "", &bucketKey) - if err != nil { - return nil, "", err - } - - return &bucketKey, etag, nil -} - -// CreateStoragePoolBucketKey adds a key to a storage bucket. -func (r *ProtocolLXD) CreateStoragePoolBucketKey(poolName string, bucketName string, key api.StorageBucketKeysPost) (*api.StorageBucketKey, error) { - err := r.CheckExtension("storage_buckets") - if err != nil { - return nil, err - } - - // Send the request and get the resulting key info (including generated keys). - var newKey api.StorageBucketKey - u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys") - _, err = r.queryStruct("POST", u.String(), key, "", &newKey) - if err != nil { - return nil, err - } - - return &newKey, err -} - -// UpdateStoragePoolBucketKey updates an existing storage bucket key. -func (r *ProtocolLXD) UpdateStoragePoolBucketKey(poolName string, bucketName string, keyName string, key api.StorageBucketKeyPut, ETag string) error { - err := r.CheckExtension("storage_buckets") - if err != nil { - return err - } - - // Send the request. - u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys", keyName) - _, _, err = r.query("PUT", u.String(), key, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteStoragePoolBucketKey removes a key from a storage bucket. -func (r *ProtocolLXD) DeleteStoragePoolBucketKey(poolName string, bucketName string, keyName string) error { - err := r.CheckExtension("storage_buckets") - if err != nil { - return err - } - - // Send the request. - u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys", keyName) - _, _, err = r.query("DELETE", u.String(), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_storage_pools.go b/vendor/github.com/lxc/lxd/client/lxd_storage_pools.go deleted file mode 100644 index 0b9297ab..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_storage_pools.go +++ /dev/null @@ -1,128 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// Storage pool handling functions - -// GetStoragePoolNames returns the names of all storage pools. -func (r *ProtocolLXD) GetStoragePoolNames() ([]string, error) { - if !r.HasExtension("storage") { - return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := "/storage-pools" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetStoragePools returns a list of StoragePool entries. -func (r *ProtocolLXD) GetStoragePools() ([]api.StoragePool, error) { - if !r.HasExtension("storage") { - return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - pools := []api.StoragePool{} - - // Fetch the raw value - _, err := r.queryStruct("GET", "/storage-pools?recursion=1", nil, "", &pools) - if err != nil { - return nil, err - } - - return pools, nil -} - -// GetStoragePool returns a StoragePool entry for the provided pool name. -func (r *ProtocolLXD) GetStoragePool(name string) (*api.StoragePool, string, error) { - if !r.HasExtension("storage") { - return nil, "", fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - pool := api.StoragePool{} - - // Fetch the raw value - etag, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s", url.PathEscape(name)), nil, "", &pool) - if err != nil { - return nil, "", err - } - - return &pool, etag, nil -} - -// CreateStoragePool defines a new storage pool using the provided StoragePool struct. -func (r *ProtocolLXD) CreateStoragePool(pool api.StoragePoolsPost) error { - if !r.HasExtension("storage") { - return fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - if pool.Driver == "ceph" && !r.HasExtension("storage_driver_ceph") { - return fmt.Errorf("The server is missing the required \"storage_driver_ceph\" API extension") - } - - // Send the request - _, _, err := r.query("POST", "/storage-pools", pool, "") - if err != nil { - return err - } - - return nil -} - -// UpdateStoragePool updates the pool to match the provided StoragePool struct. -func (r *ProtocolLXD) UpdateStoragePool(name string, pool api.StoragePoolPut, ETag string) error { - if !r.HasExtension("storage") { - return fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/storage-pools/%s", url.PathEscape(name)), pool, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteStoragePool deletes a storage pool. -func (r *ProtocolLXD) DeleteStoragePool(name string) error { - if !r.HasExtension("storage") { - return fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/storage-pools/%s", url.PathEscape(name)), nil, "") - if err != nil { - return err - } - - return nil -} - -// GetStoragePoolResources gets the resources available to a given storage pool. -func (r *ProtocolLXD) GetStoragePoolResources(name string) (*api.ResourcesStoragePool, error) { - if !r.HasExtension("resources") { - return nil, fmt.Errorf("The server is missing the required \"resources\" API extension") - } - - res := api.ResourcesStoragePool{} - - // Fetch the raw value - _, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/resources", url.PathEscape(name)), nil, "", &res) - if err != nil { - return nil, err - } - - return &res, nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_storage_volumes.go b/vendor/github.com/lxc/lxd/client/lxd_storage_volumes.go deleted file mode 100644 index ec67890f..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_storage_volumes.go +++ /dev/null @@ -1,983 +0,0 @@ -package lxd - -import ( - "fmt" - "io" - "net/http" - "net/url" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/cancel" - "github.com/lxc/lxd/shared/ioprogress" - "github.com/lxc/lxd/shared/units" -) - -// Storage volumes handling function - -// GetStoragePoolVolumeNames returns the names of all volumes in a pool. -func (r *ProtocolLXD) GetStoragePoolVolumeNames(pool string) ([]string, error) { - if !r.HasExtension("storage") { - return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("/storage-pools/%s/volumes", url.PathEscape(pool)) - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetStoragePoolVolumeNamesAllProjects returns the names of all volumes in a pool for all projects. -func (r *ProtocolLXD) GetStoragePoolVolumeNamesAllProjects(pool string) ([]string, error) { - err := r.CheckExtension("storage") - if err != nil { - return nil, err - } - - err = r.CheckExtension("storage_volumes_all_projects") - if err != nil { - return nil, err - } - - // Fetch the raw URL values. - urls := []string{} - u := api.NewURL().Path("storage-pools", pool, "volumes").WithQuery("all-projects", "true") - _, err = r.queryStruct("GET", u.String(), nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(u.String(), urls...) -} - -// GetStoragePoolVolumes returns a list of StorageVolume entries for the provided pool. -func (r *ProtocolLXD) GetStoragePoolVolumes(pool string) ([]api.StorageVolume, error) { - if !r.HasExtension("storage") { - return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - volumes := []api.StorageVolume{} - - // Fetch the raw value - _, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/volumes?recursion=1", url.PathEscape(pool)), nil, "", &volumes) - if err != nil { - return nil, err - } - - return volumes, nil -} - -// GetStoragePoolVolumesAllProjects returns a list of StorageVolume entries for the provided pool for all projects. -func (r *ProtocolLXD) GetStoragePoolVolumesAllProjects(pool string) ([]api.StorageVolume, error) { - err := r.CheckExtension("storage") - if err != nil { - return nil, err - } - - err = r.CheckExtension("storage_volumes_all_projects") - if err != nil { - return nil, err - } - - volumes := []api.StorageVolume{} - - url := api.NewURL().Path("storage-pools", pool, "volumes"). - WithQuery("recursion", "1"). - WithQuery("all-projects", "true") - - // Fetch the raw value. - _, err = r.queryStruct("GET", url.String(), nil, "", &volumes) - if err != nil { - return nil, err - } - - return volumes, nil -} - -// GetStoragePoolVolumesWithFilter returns a filtered list of StorageVolume entries for the provided pool. -func (r *ProtocolLXD) GetStoragePoolVolumesWithFilter(pool string, filters []string) ([]api.StorageVolume, error) { - if !r.HasExtension("storage") { - return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - volumes := []api.StorageVolume{} - - v := url.Values{} - v.Set("recursion", "1") - v.Set("filter", parseFilters(filters)) - // Fetch the raw value - _, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/volumes?%s", url.PathEscape(pool), v.Encode()), nil, "", &volumes) - if err != nil { - return nil, err - } - - return volumes, nil -} - -// GetStoragePoolVolumesWithFilterAllProjects returns a filtered list of StorageVolume entries for the provided pool for all projects. -func (r *ProtocolLXD) GetStoragePoolVolumesWithFilterAllProjects(pool string, filters []string) ([]api.StorageVolume, error) { - err := r.CheckExtension("storage") - if err != nil { - return nil, err - } - - err = r.CheckExtension("storage_volumes_all_projects") - if err != nil { - return nil, err - } - - volumes := []api.StorageVolume{} - - url := api.NewURL().Path("storage-pools", pool, "volumes"). - WithQuery("recursion", "1"). - WithQuery("filter", parseFilters(filters)). - WithQuery("all-projects", "true") - - // Fetch the raw value. - _, err = r.queryStruct("GET", url.String(), nil, "", &volumes) - if err != nil { - return nil, err - } - - return volumes, nil -} - -// GetStoragePoolVolume returns a StorageVolume entry for the provided pool and volume name. -func (r *ProtocolLXD) GetStoragePoolVolume(pool string, volType string, name string) (*api.StorageVolume, string, error) { - if !r.HasExtension("storage") { - return nil, "", fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - volume := api.StorageVolume{} - - // Fetch the raw value - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) - etag, err := r.queryStruct("GET", path, nil, "", &volume) - if err != nil { - return nil, "", err - } - - return &volume, etag, nil -} - -// GetStoragePoolVolumeState returns a StorageVolumeState entry for the provided pool and volume name. -func (r *ProtocolLXD) GetStoragePoolVolumeState(pool string, volType string, name string) (*api.StorageVolumeState, error) { - if !r.HasExtension("storage_volume_state") { - return nil, fmt.Errorf("The server is missing the required \"storage_volume_state\" API extension") - } - - // Fetch the raw value - state := api.StorageVolumeState{} - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/state", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) - _, err := r.queryStruct("GET", path, nil, "", &state) - if err != nil { - return nil, err - } - - return &state, nil -} - -// CreateStoragePoolVolume defines a new storage volume. -func (r *ProtocolLXD) CreateStoragePoolVolume(pool string, volume api.StorageVolumesPost) error { - if !r.HasExtension("storage") { - return fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - // Send the request - path := fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(volume.Type)) - _, _, err := r.query("POST", path, volume, "") - if err != nil { - return err - } - - return nil -} - -// CreateStoragePoolVolumeSnapshot defines a new storage volume. -func (r *ProtocolLXD) CreateStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshot api.StorageVolumeSnapshotsPost) (Operation, error) { - if !r.HasExtension("storage_api_volume_snapshots") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") - } - - // Send the request - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots", - url.PathEscape(pool), - url.PathEscape(volumeType), - url.PathEscape(volumeName)) - op, _, err := r.queryOperation("POST", path, snapshot, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// GetStoragePoolVolumeSnapshotNames returns a list of snapshot names for the -// storage volume. -func (r *ProtocolLXD) GetStoragePoolVolumeSnapshotNames(pool string, volumeType string, volumeName string) ([]string, error) { - if !r.HasExtension("storage_api_volume_snapshots") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName)) - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetStoragePoolVolumeSnapshots returns a list of snapshots for the storage -// volume. -func (r *ProtocolLXD) GetStoragePoolVolumeSnapshots(pool string, volumeType string, volumeName string) ([]api.StorageVolumeSnapshot, error) { - if !r.HasExtension("storage_api_volume_snapshots") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") - } - - snapshots := []api.StorageVolumeSnapshot{} - - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots?recursion=1", - url.PathEscape(pool), - url.PathEscape(volumeType), - url.PathEscape(volumeName)) - _, err := r.queryStruct("GET", path, nil, "", &snapshots) - if err != nil { - return nil, err - } - - return snapshots, nil -} - -// GetStoragePoolVolumeSnapshot returns a snapshots for the storage volume. -func (r *ProtocolLXD) GetStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string) (*api.StorageVolumeSnapshot, string, error) { - if !r.HasExtension("storage_api_volume_snapshots") { - return nil, "", fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") - } - - snapshot := api.StorageVolumeSnapshot{} - - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots/%s", - url.PathEscape(pool), - url.PathEscape(volumeType), - url.PathEscape(volumeName), - url.PathEscape(snapshotName)) - etag, err := r.queryStruct("GET", path, nil, "", &snapshot) - if err != nil { - return nil, "", err - } - - return &snapshot, etag, nil -} - -// RenameStoragePoolVolumeSnapshot renames a storage volume snapshot. -func (r *ProtocolLXD) RenameStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string, snapshot api.StorageVolumeSnapshotPost) (Operation, error) { - if !r.HasExtension("storage_api_volume_snapshots") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") - } - - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots/%s", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName), url.PathEscape(snapshotName)) - // Send the request - op, _, err := r.queryOperation("POST", path, snapshot, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteStoragePoolVolumeSnapshot deletes a storage volume snapshot. -func (r *ProtocolLXD) DeleteStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string) (Operation, error) { - if !r.HasExtension("storage_api_volume_snapshots") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") - } - - // Send the request - path := fmt.Sprintf( - "/storage-pools/%s/volumes/%s/%s/snapshots/%s", - url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName), url.PathEscape(snapshotName)) - - op, _, err := r.queryOperation("DELETE", path, nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// UpdateStoragePoolVolumeSnapshot updates the volume to match the provided StoragePoolVolume struct. -func (r *ProtocolLXD) UpdateStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string, volume api.StorageVolumeSnapshotPut, ETag string) error { - if !r.HasExtension("storage_api_volume_snapshots") { - return fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") - } - - // Send the request - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots/%s", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName), url.PathEscape(snapshotName)) - _, _, err := r.queryOperation("PUT", path, volume, ETag) - if err != nil { - return err - } - - return nil -} - -// MigrateStoragePoolVolume requests that LXD prepares for a storage volume migration. -func (r *ProtocolLXD) MigrateStoragePoolVolume(pool string, volume api.StorageVolumePost) (Operation, error) { - if !r.HasExtension("storage_api_remote_volume_handling") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_remote_volume_handling\" API extension") - } - - // Quick check. - if !volume.Migration { - return nil, fmt.Errorf("Can't ask for a rename through MigrateStoragePoolVolume") - } - - // Send the request - path := fmt.Sprintf("/storage-pools/%s/volumes/custom/%s", url.PathEscape(pool), volume.Name) - op, _, err := r.queryOperation("POST", path, volume, "") - if err != nil { - return nil, err - } - - return op, nil -} - -func (r *ProtocolLXD) tryMigrateStoragePoolVolume(source InstanceServer, pool string, req api.StorageVolumePost, urls []string) (RemoteOperation, error) { - if len(urls) == 0 { - return nil, fmt.Errorf("The source server isn't listening on the network") - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - operation := req.Target.Operation - - // Forward targetOp to remote op - go func() { - success := false - var errors []remoteOperationResult - for _, serverURL := range urls { - req.Target.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) - - // Send the request - top, err := source.MigrateStoragePoolVolume(pool, req) - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - continue - } - - rop := remoteOperation{ - targetOp: top, - chDone: make(chan bool), - } - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - - if shared.IsConnectionError(err) { - continue - } - - break - } - - success = true - break - } - - if !success { - rop.err = remoteOperationError("Failed storage volume creation", errors) - } - - close(rop.chDone) - }() - - return &rop, nil -} - -func (r *ProtocolLXD) tryCreateStoragePoolVolume(pool string, req api.StorageVolumesPost, urls []string) (RemoteOperation, error) { - if len(urls) == 0 { - return nil, fmt.Errorf("The source server isn't listening on the network") - } - - rop := remoteOperation{ - chDone: make(chan bool), - } - - operation := req.Source.Operation - - // Forward targetOp to remote op - go func() { - success := false - var errors []remoteOperationResult - for _, serverURL := range urls { - req.Source.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) - - // Send the request - path := fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(req.Type)) - top, _, err := r.queryOperation("POST", path, req, "") - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - continue - } - - rop := remoteOperation{ - targetOp: top, - chDone: make(chan bool), - } - - for _, handler := range rop.handlers { - _, _ = rop.targetOp.AddHandler(handler) - } - - err = rop.targetOp.Wait() - if err != nil { - errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) - - if shared.IsConnectionError(err) { - continue - } - - break - } - - success = true - break - } - - if !success { - rop.err = remoteOperationError("Failed storage volume creation", errors) - } - - close(rop.chDone) - }() - - return &rop, nil -} - -// CopyStoragePoolVolume copies an existing storage volume. -func (r *ProtocolLXD) CopyStoragePoolVolume(pool string, source InstanceServer, sourcePool string, volume api.StorageVolume, args *StoragePoolVolumeCopyArgs) (RemoteOperation, error) { - if !r.HasExtension("storage_api_local_volume_handling") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_local_volume_handling\" API extension") - } - - if args != nil && args.VolumeOnly && !r.HasExtension("storage_api_volume_snapshots") { - return nil, fmt.Errorf("The target server is missing the required \"storage_api_volume_snapshots\" API extension") - } - - if args != nil && args.Refresh && !r.HasExtension("custom_volume_refresh") { - return nil, fmt.Errorf("The target server is missing the required \"custom_volume_refresh\" API extension") - } - - req := api.StorageVolumesPost{ - Name: args.Name, - Type: volume.Type, - Source: api.StorageVolumeSource{ - Name: volume.Name, - Type: "copy", - Pool: sourcePool, - VolumeOnly: args.VolumeOnly, - Refresh: args.Refresh, - }, - } - - req.Config = volume.Config - req.Description = volume.Description - req.ContentType = volume.ContentType - - sourceInfo, err := source.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get source connection info: %w", err) - } - - destInfo, err := r.GetConnectionInfo() - if err != nil { - return nil, fmt.Errorf("Failed to get destination connection info: %w", err) - } - - if destInfo.URL == sourceInfo.URL && destInfo.SocketPath == sourceInfo.SocketPath && (volume.Location == r.clusterTarget || (volume.Location == "none" && r.clusterTarget == "")) { - // Project handling - if destInfo.Project != sourceInfo.Project { - if !r.HasExtension("storage_api_project") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_project\" API extension") - } - - req.Source.Project = sourceInfo.Project - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(volume.Type)), req, "") - if err != nil { - return nil, err - } - - rop := remoteOperation{ - targetOp: op, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - if !r.HasExtension("storage_api_remote_volume_handling") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_remote_volume_handling\" API extension") - } - - sourceReq := api.StorageVolumePost{ - Migration: true, - Name: volume.Name, - Pool: sourcePool, - } - - if args != nil { - sourceReq.VolumeOnly = args.VolumeOnly - } - - // Push mode migration - if args != nil && args.Mode == "push" { - // Get target server connection information - info, err := r.GetConnectionInfo() - if err != nil { - return nil, err - } - - // Create the container - req.Source.Type = "migration" - req.Source.Mode = "push" - - // Send the request - path := fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(volume.Type)) - - // Send the request - op, _, err := r.queryOperation("POST", path, req, "") - if err != nil { - return nil, err - } - - opAPI := op.Get() - - targetSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Prepare the source request - target := api.StorageVolumePostTarget{} - target.Operation = opAPI.ID - target.Websockets = targetSecrets - target.Certificate = info.Certificate - sourceReq.Target = &target - - return r.tryMigrateStoragePoolVolume(source, sourcePool, sourceReq, info.Addresses) - } - - // Get source server connection information - info, err := source.GetConnectionInfo() - if err != nil { - return nil, err - } - - // Get secrets from source server - op, err := source.MigrateStoragePoolVolume(sourcePool, sourceReq) - if err != nil { - return nil, err - } - - opAPI := op.Get() - - // Prepare source server secrets for remote - sourceSecrets := map[string]string{} - for k, v := range opAPI.Metadata { - sourceSecrets[k] = v.(string) - } - - // Relay mode migration - if args != nil && args.Mode == "relay" { - // Push copy source fields - req.Source.Type = "migration" - req.Source.Mode = "push" - - // Send the request - path := fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(volume.Type)) - - // Send the request - targetOp, _, err := r.queryOperation("POST", path, req, "") - if err != nil { - return nil, err - } - - targetOpAPI := targetOp.Get() - - // Extract the websockets - targetSecrets := map[string]string{} - for k, v := range targetOpAPI.Metadata { - targetSecrets[k] = v.(string) - } - - // Launch the relay - err = r.proxyMigration(targetOp.(*operation), targetSecrets, source, op.(*operation), sourceSecrets) - if err != nil { - return nil, err - } - - // Prepare a tracking operation - rop := remoteOperation{ - targetOp: targetOp, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil - } - - // Pull mode migration - req.Source.Type = "migration" - req.Source.Mode = "pull" - req.Source.Operation = opAPI.ID - req.Source.Websockets = sourceSecrets - req.Source.Certificate = info.Certificate - - return r.tryCreateStoragePoolVolume(pool, req, info.Addresses) -} - -// MoveStoragePoolVolume renames or moves an existing storage volume. -func (r *ProtocolLXD) MoveStoragePoolVolume(pool string, source InstanceServer, sourcePool string, volume api.StorageVolume, args *StoragePoolVolumeMoveArgs) (RemoteOperation, error) { - if !r.HasExtension("storage_api_local_volume_handling") { - return nil, fmt.Errorf("The server is missing the required \"storage_api_local_volume_handling\" API extension") - } - - if r != source { - return nil, fmt.Errorf("Moving storage volumes between remotes is not implemented") - } - - req := api.StorageVolumePost{ - Name: args.Name, - Pool: pool, - } - - if args.Project != "" { - if !r.HasExtension("storage_volume_project_move") { - return nil, fmt.Errorf("The server is missing the required \"storage_volume_project_move\" API extension") - } - - req.Project = args.Project - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(sourcePool), url.PathEscape(volume.Type), volume.Name), req, "") - if err != nil { - return nil, err - } - - rop := remoteOperation{ - targetOp: op, - chDone: make(chan bool), - } - - // Forward targetOp to remote op - go func() { - rop.err = rop.targetOp.Wait() - close(rop.chDone) - }() - - return &rop, nil -} - -// UpdateStoragePoolVolume updates the volume to match the provided StoragePoolVolume struct. -func (r *ProtocolLXD) UpdateStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePut, ETag string) error { - if !r.HasExtension("storage") { - return fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - if volume.Restore != "" && !r.HasExtension("storage_api_volume_snapshots") { - return fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") - } - - // Send the request - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) - _, _, err := r.query("PUT", path, volume, ETag) - if err != nil { - return err - } - - return nil -} - -// DeleteStoragePoolVolume deletes a storage pool. -func (r *ProtocolLXD) DeleteStoragePoolVolume(pool string, volType string, name string) error { - if !r.HasExtension("storage") { - return fmt.Errorf("The server is missing the required \"storage\" API extension") - } - - // Send the request - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) - _, _, err := r.query("DELETE", path, nil, "") - if err != nil { - return err - } - - return nil -} - -// RenameStoragePoolVolume renames a storage volume. -func (r *ProtocolLXD) RenameStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePost) error { - if !r.HasExtension("storage_api_volume_rename") { - return fmt.Errorf("The server is missing the required \"storage_api_volume_rename\" API extension") - } - - path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) - - // Send the request - _, _, err := r.query("POST", path, volume, "") - if err != nil { - return err - } - - return nil -} - -// GetStoragePoolVolumeBackupNames returns a list of volume backup names. -func (r *ProtocolLXD) GetStoragePoolVolumeBackupNames(pool string, volName string) ([]string, error) { - if !r.HasExtension("custom_volume_backup") { - return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") - } - - // Fetch the raw URL values. - urls := []string{} - baseURL := fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups", url.PathEscape(pool), url.PathEscape(volName)) - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetStoragePoolVolumeBackups returns a list of custom volume backups. -func (r *ProtocolLXD) GetStoragePoolVolumeBackups(pool string, volName string) ([]api.StoragePoolVolumeBackup, error) { - if !r.HasExtension("custom_volume_backup") { - return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") - } - - // Fetch the raw value - backups := []api.StoragePoolVolumeBackup{} - - _, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups?recursion=1", url.PathEscape(pool), url.PathEscape(volName)), nil, "", &backups) - if err != nil { - return nil, err - } - - return backups, nil -} - -// GetStoragePoolVolumeBackup returns a custom volume backup. -func (r *ProtocolLXD) GetStoragePoolVolumeBackup(pool string, volName string, name string) (*api.StoragePoolVolumeBackup, string, error) { - if !r.HasExtension("custom_volume_backup") { - return nil, "", fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") - } - - // Fetch the raw value - backup := api.StoragePoolVolumeBackup{} - etag, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups/%s", url.PathEscape(pool), url.PathEscape(volName), url.PathEscape(name)), nil, "", &backup) - if err != nil { - return nil, "", err - } - - return &backup, etag, nil -} - -// CreateStoragePoolVolumeBackup creates new custom volume backup. -func (r *ProtocolLXD) CreateStoragePoolVolumeBackup(pool string, volName string, backup api.StoragePoolVolumeBackupsPost) (Operation, error) { - if !r.HasExtension("custom_volume_backup") { - return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups", url.PathEscape(pool), url.PathEscape(volName)), backup, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// RenameStoragePoolVolumeBackup renames a custom volume backup. -func (r *ProtocolLXD) RenameStoragePoolVolumeBackup(pool string, volName string, name string, backup api.StoragePoolVolumeBackupPost) (Operation, error) { - if !r.HasExtension("custom_volume_backup") { - return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups/%s", url.PathEscape(pool), url.PathEscape(volName), url.PathEscape(name)), backup, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// DeleteStoragePoolVolumeBackup deletes a custom volume backup. -func (r *ProtocolLXD) DeleteStoragePoolVolumeBackup(pool string, volName string, name string) (Operation, error) { - if !r.HasExtension("custom_volume_backup") { - return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") - } - - // Send the request - op, _, err := r.queryOperation("DELETE", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups/%s", url.PathEscape(pool), url.PathEscape(volName), url.PathEscape(name)), nil, "") - if err != nil { - return nil, err - } - - return op, nil -} - -// GetStoragePoolVolumeBackupFile requests the custom volume backup content. -func (r *ProtocolLXD) GetStoragePoolVolumeBackupFile(pool string, volName string, name string, req *BackupFileRequest) (*BackupFileResponse, error) { - if !r.HasExtension("custom_volume_backup") { - return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") - } - - // Build the URL - uri := fmt.Sprintf("%s/1.0/storage-pools/%s/volumes/custom/%s/backups/%s/export", r.httpBaseURL.String(), url.PathEscape(pool), url.PathEscape(volName), url.PathEscape(name)) - - if r.project != "" { - uri += fmt.Sprintf("?project=%s", url.QueryEscape(r.project)) - } - - // Prepare the download request - request, err := http.NewRequest("GET", uri, nil) - if err != nil { - return nil, err - } - - if r.httpUserAgent != "" { - request.Header.Set("User-Agent", r.httpUserAgent) - } - - // Start the request - response, doneCh, err := cancel.CancelableDownload(req.Canceler, r.http, request) - if err != nil { - return nil, err - } - - defer func() { _ = response.Body.Close() }() - defer close(doneCh) - - if response.StatusCode != http.StatusOK { - _, _, err := lxdParseResponse(response) - if err != nil { - return nil, err - } - } - - // Handle the data - body := response.Body - if req.ProgressHandler != nil { - body = &ioprogress.ProgressReader{ - ReadCloser: response.Body, - Tracker: &ioprogress.ProgressTracker{ - Length: response.ContentLength, - Handler: func(percent int64, speed int64) { - req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))}) - }, - }, - } - } - - size, err := io.Copy(req.BackupFile, body) - if err != nil { - return nil, err - } - - resp := BackupFileResponse{} - resp.Size = size - - return &resp, nil -} - -// CreateStoragePoolVolumeFromBackup creates a custom volume from a backup file. -func (r *ProtocolLXD) CreateStoragePoolVolumeFromBackup(pool string, args StoragePoolVolumeBackupArgs) (Operation, error) { - if !r.HasExtension("custom_volume_backup") { - return nil, fmt.Errorf(`The server is missing the required "custom_volume_backup" API extension`) - } - - if args.Name != "" && !r.HasExtension("backup_override_name") { - return nil, fmt.Errorf(`The server is missing the required "backup_override_name" API extension`) - } - - path := fmt.Sprintf("/storage-pools/%s/volumes/custom", url.PathEscape(pool)) - - // Prepare the HTTP request. - reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path)) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", reqURL, args.BackupFile) - if err != nil { - return nil, err - } - - req.Header.Set("Content-Type", "application/octet-stream") - - if args.Name != "" { - req.Header.Set("X-LXD-name", args.Name) - } - - // Send the request. - resp, err := r.DoHTTP(req) - if err != nil { - return nil, err - } - - defer func() { _ = resp.Body.Close() }() - - // Handle errors. - response, _, err := lxdParseResponse(resp) - if err != nil { - return nil, err - } - - // Get to the operation. - respOperation, err := response.MetadataAsOperation() - if err != nil { - return nil, err - } - - // Setup an Operation wrapper. - op := operation{ - Operation: *respOperation, - r: r, - chActive: make(chan bool), - } - - return &op, nil -} diff --git a/vendor/github.com/lxc/lxd/client/lxd_warnings.go b/vendor/github.com/lxc/lxd/client/lxd_warnings.go deleted file mode 100644 index d4f0b70a..00000000 --- a/vendor/github.com/lxc/lxd/client/lxd_warnings.go +++ /dev/null @@ -1,90 +0,0 @@ -package lxd - -import ( - "fmt" - "net/url" - - "github.com/lxc/lxd/shared/api" -) - -// Warning handling functions - -// GetWarningUUIDs returns a list of operation uuids. -func (r *ProtocolLXD) GetWarningUUIDs() ([]string, error) { - if !r.HasExtension("warnings") { - return nil, fmt.Errorf("The server is missing the required \"warnings\" API extension") - } - - // Fetch the raw values. - urls := []string{} - baseURL := "/warnings" - _, err := r.queryStruct("GET", baseURL, nil, "", &urls) - if err != nil { - return nil, err - } - - // Parse it. - return urlsToResourceNames(baseURL, urls...) -} - -// GetWarnings returns a list of warnings. -func (r *ProtocolLXD) GetWarnings() ([]api.Warning, error) { - if !r.HasExtension("warnings") { - return nil, fmt.Errorf("The server is missing the required \"warnings\" API extension") - } - - warnings := []api.Warning{} - - _, err := r.queryStruct("GET", "/warnings?recursion=1", nil, "", &warnings) - if err != nil { - return nil, err - } - - return warnings, nil -} - -// GetWarning returns the warning with the given UUID. -func (r *ProtocolLXD) GetWarning(UUID string) (*api.Warning, string, error) { - if !r.HasExtension("warnings") { - return nil, "", fmt.Errorf("The server is missing the required \"warnings\" API extension") - } - - warning := api.Warning{} - - etag, err := r.queryStruct("GET", fmt.Sprintf("/warnings/%s", url.PathEscape(UUID)), nil, "", &warning) - if err != nil { - return nil, "", err - } - - return &warning, etag, nil -} - -// UpdateWarning updates the warning with the given UUID. -func (r *ProtocolLXD) UpdateWarning(UUID string, warning api.WarningPut, ETag string) error { - if !r.HasExtension("warnings") { - return fmt.Errorf("The server is missing the required \"warnings\" API extension") - } - - // Send the request - _, _, err := r.query("PUT", fmt.Sprintf("/warnings/%s", url.PathEscape(UUID)), warning, "") - if err != nil { - return err - } - - return nil -} - -// DeleteWarning deletes the provided warning. -func (r *ProtocolLXD) DeleteWarning(UUID string) error { - if !r.HasExtension("warnings") { - return fmt.Errorf("The server is missing the required \"warnings\" API extension") - } - - // Send the request - _, _, err := r.query("DELETE", fmt.Sprintf("/warnings/%s", url.PathEscape(UUID)), nil, "") - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/client/operations.go b/vendor/github.com/lxc/lxd/client/operations.go deleted file mode 100644 index b38a3e9d..00000000 --- a/vendor/github.com/lxc/lxd/client/operations.go +++ /dev/null @@ -1,340 +0,0 @@ -package lxd - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "sync" - - "github.com/gorilla/websocket" - - "github.com/lxc/lxd/shared/api" -) - -// The Operation type represents an ongoing LXD operation (asynchronous processing). -type operation struct { - api.Operation - - r *ProtocolLXD - listener *EventListener - handlerReady bool - handlerLock sync.Mutex - - chActive chan bool -} - -// AddHandler adds a function to be called whenever an event is received. -func (op *operation) AddHandler(function func(api.Operation)) (*EventTarget, error) { - // Make sure we have a listener setup - err := op.setupListener() - if err != nil { - return nil, err - } - - // Make sure we're not racing with ourselves - op.handlerLock.Lock() - defer op.handlerLock.Unlock() - - // If we're done already, just return - if op.StatusCode.IsFinal() { - return nil, nil - } - - // Wrap the function to filter unwanted messages - wrapped := func(event api.Event) { - op.handlerLock.Lock() - - newOp := api.Operation{} - err := json.Unmarshal(event.Metadata, &newOp) - if err != nil || newOp.ID != op.ID { - op.handlerLock.Unlock() - return - } - - op.handlerLock.Unlock() - - function(newOp) - } - - return op.listener.AddHandler([]string{"operation"}, wrapped) -} - -// Cancel will request that LXD cancels the operation (if supported). -func (op *operation) Cancel() error { - return op.r.DeleteOperation(op.ID) -} - -// Get returns the API operation struct. -func (op *operation) Get() api.Operation { - return op.Operation -} - -// GetWebsocket returns a raw websocket connection from the operation. -func (op *operation) GetWebsocket(secret string) (*websocket.Conn, error) { - return op.r.GetOperationWebsocket(op.ID, secret) -} - -// RemoveHandler removes a function to be called whenever an event is received. -func (op *operation) RemoveHandler(target *EventTarget) error { - // Make sure we're not racing with ourselves - op.handlerLock.Lock() - defer op.handlerLock.Unlock() - - // If the listener is gone, just return - if op.listener == nil { - return nil - } - - return op.listener.RemoveHandler(target) -} - -// Refresh pulls the current version of the operation and updates the struct. -func (op *operation) Refresh() error { - // Get the current version of the operation - newOp, _, err := op.r.GetOperation(op.ID) - if err != nil { - return err - } - - // Update the operation struct - op.Operation = *newOp - - return nil -} - -// Wait lets you wait until the operation reaches a final state. -func (op *operation) Wait() error { - return op.WaitContext(context.Background()) -} - -// WaitContext lets you wait until the operation reaches a final state with context.Context. -func (op *operation) WaitContext(ctx context.Context) error { - op.handlerLock.Lock() - // Check if not done already - if op.StatusCode.IsFinal() { - if op.Err != "" { - op.handlerLock.Unlock() - return errors.New(op.Err) - } - - op.handlerLock.Unlock() - return nil - } - - op.handlerLock.Unlock() - - // Make sure we have a listener setup - err := op.setupListener() - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - case <-op.chActive: - } - - // We're done, parse the result - if op.Err != "" { - return errors.New(op.Err) - } - - return nil -} - -func (op *operation) setupListener() error { - // Make sure we're not racing with ourselves - op.handlerLock.Lock() - defer op.handlerLock.Unlock() - - // We already have a listener setup - if op.handlerReady { - return nil - } - - op.handlerReady = true - - // Get a new listener - if op.listener == nil { - listener, err := op.r.GetEvents() - if err != nil { - return err - } - - op.listener = listener - } - - // Setup the handler - chReady := make(chan bool) - _, err := op.listener.AddHandler([]string{"operation"}, func(event api.Event) { - <-chReady - - // We don't want concurrency while processing events - op.handlerLock.Lock() - defer op.handlerLock.Unlock() - - // Check if we're done already (because of another event) - if op.listener == nil { - return - } - - // Get an operation struct out of this data - newOp := api.Operation{} - err := json.Unmarshal(event.Metadata, &newOp) - if err != nil || newOp.ID != op.ID { - return - } - - // Update the struct - op.Operation = newOp - - // And check if we're done - if op.StatusCode.IsFinal() { - op.listener.Disconnect() - op.listener = nil - close(op.chActive) - return - } - }) - if err != nil { - op.listener.Disconnect() - op.listener = nil - close(op.chActive) - close(chReady) - - return err - } - - // Monitor event listener - go func() { - <-chReady - - // We don't want concurrency while accessing the listener - op.handlerLock.Lock() - - // Check if we're done already (because of another event) - listener := op.listener - if listener == nil { - op.handlerLock.Unlock() - return - } - - op.handlerLock.Unlock() - - // Wait for the listener or operation to be done - select { - case <-listener.ctx.Done(): - op.handlerLock.Lock() - if op.listener != nil { - op.Err = listener.err.Error() - close(op.chActive) - } - - op.handlerLock.Unlock() - case <-op.chActive: - return - } - }() - - // And do a manual refresh to avoid races - err = op.Refresh() - if err != nil { - op.listener.Disconnect() - op.listener = nil - close(op.chActive) - close(chReady) - - return err - } - - // Check if not done already - if op.StatusCode.IsFinal() { - op.listener.Disconnect() - op.listener = nil - close(op.chActive) - close(chReady) - - if op.Err != "" { - return errors.New(op.Err) - } - - return nil - } - - // Start processing background updates - close(chReady) - - return nil -} - -// The remoteOperation type represents an ongoing LXD operation between two servers. -type remoteOperation struct { - targetOp Operation - - handlers []func(api.Operation) - handlerLock sync.Mutex - - chDone chan bool - chPost chan bool - err error -} - -// AddHandler adds a function to be called whenever an event is received. -func (op *remoteOperation) AddHandler(function func(api.Operation)) (*EventTarget, error) { - var err error - var target *EventTarget - - op.handlerLock.Lock() - defer op.handlerLock.Unlock() - - // Attach to the existing target operation - if op.targetOp != nil { - target, err = op.targetOp.AddHandler(function) - if err != nil { - return nil, err - } - } else { - // Generate a mock EventTarget - target = &EventTarget{ - function: func(api.Event) { function(api.Operation{}) }, - types: []string{"operation"}, - } - } - - // Add the handler to our list - op.handlers = append(op.handlers, function) - - return target, nil -} - -// CancelTarget attempts to cancel the target operation. -func (op *remoteOperation) CancelTarget() error { - if op.targetOp == nil { - return fmt.Errorf("No associated target operation") - } - - return op.targetOp.Cancel() -} - -// GetTarget returns the target operation. -func (op *remoteOperation) GetTarget() (*api.Operation, error) { - if op.targetOp == nil { - return nil, fmt.Errorf("No associated target operation") - } - - opAPI := op.targetOp.Get() - return &opAPI, nil -} - -// Wait lets you wait until the operation reaches a final state. -func (op *remoteOperation) Wait() error { - <-op.chDone - - if op.chPost != nil { - <-op.chPost - } - - return op.err -} diff --git a/vendor/github.com/lxc/lxd/client/simplestreams.go b/vendor/github.com/lxc/lxd/client/simplestreams.go deleted file mode 100644 index 1cf5b6df..00000000 --- a/vendor/github.com/lxc/lxd/client/simplestreams.go +++ /dev/null @@ -1,52 +0,0 @@ -package lxd - -import ( - "fmt" - "net/http" - - "github.com/lxc/lxd/shared/simplestreams" -) - -// ProtocolSimpleStreams implements a SimpleStreams API client. -type ProtocolSimpleStreams struct { - ssClient *simplestreams.SimpleStreams - - http *http.Client - httpHost string - httpUserAgent string - httpCertificate string -} - -// Disconnect is a no-op for simplestreams. -func (r *ProtocolSimpleStreams) Disconnect() { -} - -// GetConnectionInfo returns the basic connection information used to interact with the server. -func (r *ProtocolSimpleStreams) GetConnectionInfo() (*ConnectionInfo, error) { - info := ConnectionInfo{} - info.Addresses = []string{r.httpHost} - info.Certificate = r.httpCertificate - info.Protocol = "simplestreams" - info.URL = r.httpHost - - return &info, nil -} - -// GetHTTPClient returns the http client used for the connection. This can be used to set custom http options. -func (r *ProtocolSimpleStreams) GetHTTPClient() (*http.Client, error) { - if r.http == nil { - return nil, fmt.Errorf("HTTP client isn't set, bad connection") - } - - return r.http, nil -} - -// DoHTTP performs a Request, using macaroon authentication if set. -func (r *ProtocolSimpleStreams) DoHTTP(req *http.Request) (*http.Response, error) { - // Set the user agent - if r.httpUserAgent != "" { - req.Header.Set("User-Agent", r.httpUserAgent) - } - - return r.http.Do(req) -} diff --git a/vendor/github.com/lxc/lxd/client/simplestreams_images.go b/vendor/github.com/lxc/lxd/client/simplestreams_images.go deleted file mode 100644 index 448a450d..00000000 --- a/vendor/github.com/lxc/lxd/client/simplestreams_images.go +++ /dev/null @@ -1,302 +0,0 @@ -package lxd - -import ( - "context" - "crypto/sha256" - "fmt" - "io" - "net/http" - "net/url" - "os" - "os/exec" - "strings" - "time" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" -) - -// Image handling functions - -// GetImages returns a list of available images as Image structs. -func (r *ProtocolSimpleStreams) GetImages() ([]api.Image, error) { - return r.ssClient.ListImages() -} - -// GetImageFingerprints returns a list of available image fingerprints. -func (r *ProtocolSimpleStreams) GetImageFingerprints() ([]string, error) { - // Get all the images from simplestreams - images, err := r.ssClient.ListImages() - if err != nil { - return nil, err - } - - // And now extract just the fingerprints - fingerprints := []string{} - for _, img := range images { - fingerprints = append(fingerprints, img.Fingerprint) - } - - return fingerprints, nil -} - -// GetImagesWithFilter returns a filtered list of available images as Image structs. -func (r *ProtocolSimpleStreams) GetImagesWithFilter(filters []string) ([]api.Image, error) { - return nil, fmt.Errorf("GetImagesWithFilter is not supported by the simplestreams protocol") -} - -// GetImage returns an Image struct for the provided fingerprint. -func (r *ProtocolSimpleStreams) GetImage(fingerprint string) (*api.Image, string, error) { - image, err := r.ssClient.GetImage(fingerprint) - if err != nil { - return nil, "", fmt.Errorf("Failed getting image: %w", err) - } - - return image, "", err -} - -// GetImageFile downloads an image from the server, returning an ImageFileResponse struct. -func (r *ProtocolSimpleStreams) GetImageFile(fingerprint string, req ImageFileRequest) (*ImageFileResponse, error) { - // Quick checks. - if req.MetaFile == nil && req.RootfsFile == nil { - return nil, fmt.Errorf("No file requested") - } - - // Attempt to download from host - if shared.PathExists("/dev/lxd/sock") && os.Geteuid() == 0 { - unixURI := fmt.Sprintf("http://unix.socket/1.0/images/%s/export", url.PathEscape(fingerprint)) - - // Setup the HTTP client - devlxdHTTP, err := unixHTTPClient(nil, "/dev/lxd/sock") - if err == nil { - resp, err := lxdDownloadImage(fingerprint, unixURI, r.httpUserAgent, devlxdHTTP, req) - if err == nil { - return resp, nil - } - } - } - - // Use relatively short response header timeout so as not to hold the image lock open too long. - // Deference client and transport in order to clone them so as to not modify timeout of base client. - httpClient := *r.http - httpTransport := httpClient.Transport.(*http.Transport).Clone() - httpTransport.ResponseHeaderTimeout = 30 * time.Second - httpClient.Transport = httpTransport - - // Get the file list - files, err := r.ssClient.GetFiles(fingerprint) - if err != nil { - return nil, err - } - - // Prepare the response - resp := ImageFileResponse{} - - // Download function - download := func(path string, filename string, hash string, target io.WriteSeeker) (int64, error) { - // Try over http - url, err := shared.JoinUrls(fmt.Sprintf("http://%s", strings.TrimPrefix(r.httpHost, "https://")), path) - if err != nil { - return -1, err - } - - size, err := shared.DownloadFileHash(context.TODO(), &httpClient, r.httpUserAgent, req.ProgressHandler, req.Canceler, filename, url, hash, sha256.New(), target) - if err != nil { - // Handle cancelation - if err.Error() == "net/http: request canceled" { - return -1, err - } - - // Try over https - url, err := shared.JoinUrls(r.httpHost, path) - if err != nil { - return -1, err - } - - size, err = shared.DownloadFileHash(context.TODO(), &httpClient, r.httpUserAgent, req.ProgressHandler, req.Canceler, filename, url, hash, sha256.New(), target) - if err != nil { - return -1, err - } - } - - return size, nil - } - - // Download the LXD image file - meta, ok := files["meta"] - if ok && req.MetaFile != nil { - size, err := download(meta.Path, "metadata", meta.Sha256, req.MetaFile) - if err != nil { - return nil, err - } - - parts := strings.Split(meta.Path, "/") - resp.MetaName = parts[len(parts)-1] - resp.MetaSize = size - } - - // Download the rootfs - rootfs, ok := files["root"] - if ok && req.RootfsFile != nil { - // Look for deltas (requires xdelta3) - downloaded := false - _, err := exec.LookPath("xdelta3") - if err == nil && req.DeltaSourceRetriever != nil { - for filename, file := range files { - if !strings.HasPrefix(filename, "root.delta-") { - continue - } - - // Check if we have the source file for the delta - srcFingerprint := strings.Split(filename, "root.delta-")[1] - srcPath := req.DeltaSourceRetriever(srcFingerprint, "rootfs") - if srcPath == "" { - continue - } - - // Create temporary file for the delta - deltaFile, err := os.CreateTemp("", "lxc_image_") - if err != nil { - return nil, err - } - - defer func() { _ = deltaFile.Close() }() - - defer func() { _ = os.Remove(deltaFile.Name()) }() - - // Download the delta - _, err = download(file.Path, "rootfs delta", file.Sha256, deltaFile) - if err != nil { - return nil, err - } - - // Create temporary file for the delta - patchedFile, err := os.CreateTemp("", "lxc_image_") - if err != nil { - return nil, err - } - - defer func() { _ = patchedFile.Close() }() - - defer func() { _ = os.Remove(patchedFile.Name()) }() - - // Apply it - _, err = shared.RunCommand("xdelta3", "-f", "-d", "-s", srcPath, deltaFile.Name(), patchedFile.Name()) - if err != nil { - return nil, err - } - - // Copy to the target - size, err := io.Copy(req.RootfsFile, patchedFile) - if err != nil { - return nil, err - } - - parts := strings.Split(rootfs.Path, "/") - resp.RootfsName = parts[len(parts)-1] - resp.RootfsSize = size - downloaded = true - } - } - - // Download the whole file - if !downloaded { - size, err := download(rootfs.Path, "rootfs", rootfs.Sha256, req.RootfsFile) - if err != nil { - return nil, err - } - - parts := strings.Split(rootfs.Path, "/") - resp.RootfsName = parts[len(parts)-1] - resp.RootfsSize = size - } - } - - return &resp, nil -} - -// GetImageSecret isn't relevant for the simplestreams protocol. -func (r *ProtocolSimpleStreams) GetImageSecret(fingerprint string) (string, error) { - return "", fmt.Errorf("Private images aren't supported by the simplestreams protocol") -} - -// GetPrivateImage isn't relevant for the simplestreams protocol. -func (r *ProtocolSimpleStreams) GetPrivateImage(fingerprint string, secret string) (*api.Image, string, error) { - return nil, "", fmt.Errorf("Private images aren't supported by the simplestreams protocol") -} - -// GetPrivateImageFile isn't relevant for the simplestreams protocol. -func (r *ProtocolSimpleStreams) GetPrivateImageFile(fingerprint string, secret string, req ImageFileRequest) (*ImageFileResponse, error) { - return nil, fmt.Errorf("Private images aren't supported by the simplestreams protocol") -} - -// GetImageAliases returns the list of available aliases as ImageAliasesEntry structs. -func (r *ProtocolSimpleStreams) GetImageAliases() ([]api.ImageAliasesEntry, error) { - return r.ssClient.ListAliases() -} - -// GetImageAliasNames returns the list of available alias names. -func (r *ProtocolSimpleStreams) GetImageAliasNames() ([]string, error) { - // Get all the images from simplestreams - aliases, err := r.ssClient.ListAliases() - if err != nil { - return nil, err - } - - // And now extract just the names - names := []string{} - for _, alias := range aliases { - names = append(names, alias.Name) - } - - return names, nil -} - -// GetImageAlias returns an existing alias as an ImageAliasesEntry struct. -func (r *ProtocolSimpleStreams) GetImageAlias(name string) (*api.ImageAliasesEntry, string, error) { - alias, err := r.ssClient.GetAlias("container", name) - if err != nil { - alias, err = r.ssClient.GetAlias("virtual-machine", name) - if err != nil { - return nil, "", err - } - } - - return alias, "", err -} - -// GetImageAliasType returns an existing alias as an ImageAliasesEntry struct. -func (r *ProtocolSimpleStreams) GetImageAliasType(imageType string, name string) (*api.ImageAliasesEntry, string, error) { - if imageType == "" { - return r.GetImageAlias(name) - } - - alias, err := r.ssClient.GetAlias(imageType, name) - if err != nil { - return nil, "", err - } - - return alias, "", err -} - -// GetImageAliasArchitectures returns a map of architectures / targets. -func (r *ProtocolSimpleStreams) GetImageAliasArchitectures(imageType string, name string) (map[string]*api.ImageAliasesEntry, error) { - if imageType == "" { - aliases, err := r.ssClient.GetAliasArchitectures("container", name) - if err != nil { - aliases, err = r.ssClient.GetAliasArchitectures("virtual-machine", name) - if err != nil { - return nil, err - } - } - - return aliases, nil - } - - return r.ssClient.GetAliasArchitectures(imageType, name) -} - -// ExportImage exports (copies) an image to a remote server. -func (r *ProtocolSimpleStreams) ExportImage(fingerprint string, image api.ImageExportPost) (Operation, error) { - return nil, fmt.Errorf("Exporting images is not supported by the simplestreams protocol") -} diff --git a/vendor/github.com/lxc/lxd/client/util.go b/vendor/github.com/lxc/lxd/client/util.go deleted file mode 100644 index 1816ec85..00000000 --- a/vendor/github.com/lxc/lxd/client/util.go +++ /dev/null @@ -1,243 +0,0 @@ -package lxd - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/lxc/lxd/shared" -) - -func tlsHTTPClient(client *http.Client, tlsClientCert string, tlsClientKey string, tlsCA string, tlsServerCert string, insecureSkipVerify bool, proxy func(req *http.Request) (*url.URL, error), transportWrapper func(t *http.Transport) HTTPTransporter) (*http.Client, error) { - // Get the TLS configuration - tlsConfig, err := shared.GetTLSConfigMem(tlsClientCert, tlsClientKey, tlsCA, tlsServerCert, insecureSkipVerify) - if err != nil { - return nil, err - } - - // Define the http transport - transport := &http.Transport{ - TLSClientConfig: tlsConfig, - Proxy: shared.ProxyFromEnvironment, - DisableKeepAlives: true, - ExpectContinueTimeout: time.Second * 30, - ResponseHeaderTimeout: time.Second * 3600, - TLSHandshakeTimeout: time.Second * 5, - } - - // Allow overriding the proxy - if proxy != nil { - transport.Proxy = proxy - } - - // Special TLS handling - transport.DialTLSContext = func(ctx context.Context, network string, addr string) (net.Conn, error) { - tlsDial := func(network string, addr string, config *tls.Config, resetName bool) (net.Conn, error) { - conn, err := shared.RFC3493Dialer(ctx, network, addr) - if err != nil { - return nil, err - } - - // Setup TLS - if resetName { - hostName, _, err := net.SplitHostPort(addr) - if err != nil { - hostName = addr - } - - config = config.Clone() - config.ServerName = hostName - } - - tlsConn := tls.Client(conn, config) - - // Validate the connection - err = tlsConn.Handshake() - if err != nil { - _ = conn.Close() - return nil, err - } - - if !config.InsecureSkipVerify { - err := tlsConn.VerifyHostname(config.ServerName) - if err != nil { - _ = conn.Close() - return nil, err - } - } - - return tlsConn, nil - } - - conn, err := tlsDial(network, addr, transport.TLSClientConfig, false) - if err != nil { - // We may have gotten redirected to a non-LXD machine - return tlsDial(network, addr, transport.TLSClientConfig, true) - } - - return conn, nil - } - - // Define the http client - if client == nil { - client = &http.Client{} - } - - if transportWrapper != nil { - client.Transport = transportWrapper(transport) - } else { - client.Transport = transport - } - - // Setup redirect policy - client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - // Replicate the headers - req.Header = via[len(via)-1].Header - - return nil - } - - return client, nil -} - -func unixHTTPClient(args *ConnectionArgs, path string) (*http.Client, error) { - // Setup a Unix socket dialer - unixDial := func(_ context.Context, network, addr string) (net.Conn, error) { - raddr, err := net.ResolveUnixAddr("unix", path) - if err != nil { - return nil, err - } - - return net.DialUnix("unix", nil, raddr) - } - - if args == nil { - args = &ConnectionArgs{} - } - - // Define the http transport - transport := &http.Transport{ - DialContext: unixDial, - DisableKeepAlives: true, - Proxy: args.Proxy, - ExpectContinueTimeout: time.Second * 30, - ResponseHeaderTimeout: time.Second * 3600, - TLSHandshakeTimeout: time.Second * 5, - } - - // Define the http client - client := args.HTTPClient - if client == nil { - client = &http.Client{} - } - - client.Transport = transport - - // Setup redirect policy - client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - // Replicate the headers - req.Header = via[len(via)-1].Header - - return nil - } - - return client, nil -} - -// remoteOperationResult used for storing the error that occurred for a particular remote URL. -type remoteOperationResult struct { - URL string - Error error -} - -func remoteOperationError(msg string, errors []remoteOperationResult) error { - // Check if empty - if len(errors) == 0 { - return nil - } - - // Check if all identical - var err error - for _, entry := range errors { - if err != nil && entry.Error.Error() != err.Error() { - errorStrs := make([]string, 0, len(errors)) - for _, error := range errors { - errorStrs = append(errorStrs, fmt.Sprintf("%s: %v", error.URL, error.Error)) - } - - return fmt.Errorf("%s:\n - %s", msg, strings.Join(errorStrs, "\n - ")) - } - - err = entry.Error - } - - // Check if successful - if err != nil { - return fmt.Errorf("%s: %w", msg, err) - } - - return nil -} - -// Set the value of a query parameter in the given URI. -func setQueryParam(uri, param, value string) (string, error) { - fields, err := url.Parse(uri) - if err != nil { - return "", err - } - - values := fields.Query() - values.Set(param, url.QueryEscape(value)) - - fields.RawQuery = values.Encode() - - return fields.String(), nil -} - -// urlsToResourceNames returns a list of resource names extracted from one or more URLs of the same resource type. -// The resource type path prefix to match is provided by the matchPathPrefix argument. -func urlsToResourceNames(matchPathPrefix string, urls ...string) ([]string, error) { - resourceNames := make([]string, 0, len(urls)) - - for _, urlRaw := range urls { - u, err := url.Parse(urlRaw) - if err != nil { - return nil, fmt.Errorf("Failed parsing URL %q: %w", urlRaw, err) - } - - fields := strings.Split(u.Path, fmt.Sprintf("%s/", matchPathPrefix)) - if len(fields) != 2 { - return nil, fmt.Errorf("Unexpected URL path %q", u) - } - - resourceNames = append(resourceNames, fields[len(fields)-1]) - } - - return resourceNames, nil -} - -// parseFilters translates filters passed at client side to form acceptable by server-side API. -func parseFilters(filters []string) string { - var result []string - for _, filter := range filters { - if strings.Contains(filter, "=") { - membs := strings.SplitN(filter, "=", 2) - result = append(result, fmt.Sprintf("%s eq %s", membs[0], membs[1])) - } - } - return strings.Join(result, " and ") -} - -// HTTPTransporter represents a wrapper around *http.Transport. -// It is used to add some pre and postprocessing logic to http requests / responses. -type HTTPTransporter interface { - http.RoundTripper - - // Transport what this struct wraps - Transport() *http.Transport -} diff --git a/vendor/github.com/lxc/lxd/lxd/device/config/consts.go b/vendor/github.com/lxc/lxd/lxd/device/config/consts.go deleted file mode 100644 index c81b2279..00000000 --- a/vendor/github.com/lxc/lxd/lxd/device/config/consts.go +++ /dev/null @@ -1,4 +0,0 @@ -package config - -// DefaultVMBlockFilesystemSize is the size of a VM root device block volume's associated filesystem volume. -const DefaultVMBlockFilesystemSize = "100MiB" diff --git a/vendor/github.com/lxc/lxd/lxd/device/config/device_proxyaddress.go b/vendor/github.com/lxc/lxd/lxd/device/config/device_proxyaddress.go deleted file mode 100644 index 422a0eba..00000000 --- a/vendor/github.com/lxc/lxd/lxd/device/config/device_proxyaddress.go +++ /dev/null @@ -1,9 +0,0 @@ -package config - -// ProxyAddress represents a proxy address configuration. -type ProxyAddress struct { - ConnType string - Abstract bool - Address string - Ports []uint64 -} diff --git a/vendor/github.com/lxc/lxd/lxd/device/config/device_runconfig.go b/vendor/github.com/lxc/lxd/lxd/device/config/device_runconfig.go deleted file mode 100644 index d0ca7c18..00000000 --- a/vendor/github.com/lxc/lxd/lxd/device/config/device_runconfig.go +++ /dev/null @@ -1,70 +0,0 @@ -package config - -import ( - "github.com/lxc/lxd/lxd/revert" -) - -// MountOwnerShiftNone do not use owner shifting. -const MountOwnerShiftNone = "" - -// MountOwnerShiftDynamic use shiftfs for dynamic owner shifting. -const MountOwnerShiftDynamic = "dynamic" - -// MountOwnerShiftStatic statically modify ownership. -const MountOwnerShiftStatic = "static" - -// RunConfigItem represents a single config item. -type RunConfigItem struct { - Key string - Value string -} - -// MountEntryItem represents a single mount entry item. -type MountEntryItem struct { - DevName string // The internal name for the device. - DevPath string // Describes the block special device or remote filesystem to be mounted. - TargetPath string // Describes the mount point (target) for the filesystem. - FSType string // Describes the type of the filesystem. - Opts []string // Describes the mount options associated with the filesystem. - Freq int // Used by dump(8) to determine which filesystems need to be dumped. Defaults to zero (don't dump) if not present. - PassNo int // Used by fsck(8) to determine the order in which filesystem checks are done at boot time. Defaults to zero (don't fsck) if not present. - OwnerShift string // Ownership shifting mode, use constants MountOwnerShiftNone, MountOwnerShiftStatic or MountOwnerShiftDynamic. -} - -// RootFSEntryItem represents the root filesystem options for an Instance. -type RootFSEntryItem struct { - Path string // Describes the root file system source. - Opts []string // Describes the mount options associated with the filesystem. -} - -// USBDeviceItem represents a single USB device matched from LXD USB device specification. -type USBDeviceItem struct { - DeviceName string - HostDevicePath string -} - -// RunConfig represents LXD defined run-time config used for device setup/cleanup. -type RunConfig struct { - RootFS RootFSEntryItem // RootFS to setup. - NetworkInterface []RunConfigItem // Network interface configuration settings. - CGroups []RunConfigItem // Cgroup rules to setup. - Mounts []MountEntryItem // Mounts to setup/remove. - Uevents [][]string // Uevents to inject. - PostHooks []func() error // Functions to be run after device attach/detach. - GPUDevice []RunConfigItem // GPU device configuration settings. - USBDevice []USBDeviceItem // USB device configuration settings. - TPMDevice []RunConfigItem // TPM device configuration settings. - PCIDevice []RunConfigItem // PCI device configuration settings. - Revert revert.Hook // Revert setup of device on post-setup error. -} - -// NICConfigDir shared constant used to indicate where NIC config is stored. -const NICConfigDir = "nics" - -// NICConfig contains network interface configuration to be passed into a VM and applied by the agent. -type NICConfig struct { - DeviceName string `json:"device_name"` - NICName string `json:"nic_name"` - MACAddress string `json:"mac_address"` - MTU uint32 `json:"mtu"` -} diff --git a/vendor/github.com/lxc/lxd/lxd/device/config/devices.go b/vendor/github.com/lxc/lxd/lxd/device/config/devices.go deleted file mode 100644 index e7bb6404..00000000 --- a/vendor/github.com/lxc/lxd/lxd/device/config/devices.go +++ /dev/null @@ -1,203 +0,0 @@ -package config - -import ( - "fmt" - "sort" - "strings" -) - -// Device represents a LXD container device. -type Device map[string]string - -// Clone returns a copy of the Device. -func (device Device) Clone() Device { - copy := make(map[string]string, len(device)) - - for k, v := range device { - copy[k] = v - } - - return copy -} - -// Validate accepts a map of field/validation functions to run against the device's config. -func (device Device) Validate(rules map[string]func(value string) error) error { - checkedFields := map[string]struct{}{} - - for k, validator := range rules { - checkedFields[k] = struct{}{} //Mark field as checked. - err := validator(device[k]) - if err != nil { - return fmt.Errorf("Invalid value for device option %q: %w", k, err) - } - } - - // Look for any unchecked fields, as these are unknown fields and validation should fail. - for k := range device { - _, checked := checkedFields[k] - if checked { - continue - } - - // Skip type fields are these are validated by the presence of an implementation. - if k == "type" { - continue - } - - // Allow user.XYZ. - if strings.HasPrefix(k, "user.") { - continue - } - - if k == "nictype" && (device["type"] == "nic" || device["type"] == "infiniband") { - continue - } - - if k == "gputype" && device["type"] == "gpu" { - continue - } - - return fmt.Errorf("Invalid device option %q", k) - } - - return nil -} - -// Devices represents a set of LXD container devices. -type Devices map[string]Device - -// NewDevices creates a new Devices set from a native map[string]map[string]string set. -func NewDevices(nativeSet map[string]map[string]string) Devices { - newDevices := Devices{} - - for devName, devConfig := range nativeSet { - newDev := Device{} - for k, v := range devConfig { - newDev[k] = v - } - - newDevices[devName] = newDev - } - - return newDevices -} - -// Contains checks if a given device exists in the set and if it's identical to that provided. -func (list Devices) Contains(k string, d Device) bool { - // If it didn't exist, it's different - if list[k] == nil { - return false - } - - old := list[k] - - return deviceEquals(old, d) -} - -// Update returns the difference between two device sets (removed, added, updated devices) and a list of all -// changed keys across all devices. Accepts a function to return which keys can be live updated, which prevents -// them being removed and re-added if the device supports live updates of certain keys. -func (list Devices) Update(newlist Devices, updateFields func(Device, Device) []string) (map[string]Device, map[string]Device, map[string]Device, []string) { - rmlist := map[string]Device{} - addlist := map[string]Device{} - updatelist := map[string]Device{} - - // Detect which devices have changed or been removed in in new list. - for key, d := range list { - // Always skip user keys. - if strings.HasPrefix(key, "user.") { - continue - } - - if !newlist.Contains(key, d) { - rmlist[key] = d - } - } - - // Detect which devices have changed or been added in in new list. - for key, d := range newlist { - // Always skip user keys. - if strings.HasPrefix(key, "user.") { - continue - } - - if !list.Contains(key, d) { - addlist[key] = d - } - } - - allChangedKeys := []string{} - for key, d := range addlist { - srcOldDevice := rmlist[key] - oldDevice := srcOldDevice.Clone() - - srcNewDevice := newlist[key] - newDevice := srcNewDevice.Clone() - - // Detect keys different between old and new device and append to the all changed keys list. - allChangedKeys = append(allChangedKeys, deviceEqualsDiffKeys(oldDevice, newDevice)...) - - // Remove any fields that can be live-updated without adding/removing the device from instance. - if updateFields != nil { - for _, k := range updateFields(oldDevice, newDevice) { - delete(oldDevice, k) - delete(newDevice, k) - } - } - - // If after removing the live-updatable keys the devices are equal, then we know the device has - // been updated rather than added or removed, so add it to the update list, and remove it from - // the added and removed lists. - if deviceEquals(oldDevice, newDevice) { - delete(rmlist, key) - delete(addlist, key) - updatelist[key] = d - } - } - - return rmlist, addlist, updatelist, allChangedKeys -} - -// Clone returns a copy of the Devices set. -func (list Devices) Clone() Devices { - copy := make(Devices, len(list)) - - for deviceName, device := range list { - copy[deviceName] = device.Clone() - } - - return copy -} - -// CloneNative returns a copy of the Devices set as a native map[string]map[string]string type. -func (list Devices) CloneNative() map[string]map[string]string { - copy := make(map[string]map[string]string, len(list)) - - for deviceName, device := range list { - copy[deviceName] = device.Clone() - } - - return copy -} - -// Sorted returns the name of all devices in the set, sorted properly. -func (list Devices) Sorted() DevicesSortable { - sortable := DevicesSortable{} - for k, d := range list { - sortable = append(sortable, DeviceNamed{k, d}) - } - - sort.Sort(sortable) - return sortable -} - -// Reversed returns the name of all devices in the set, sorted reversed. -func (list Devices) Reversed() DevicesSortable { - sortable := DevicesSortable{} - for k, d := range list { - sortable = append(sortable, DeviceNamed{k, d}) - } - - sort.Sort(sort.Reverse(sortable)) - return sortable -} diff --git a/vendor/github.com/lxc/lxd/lxd/device/config/devices_sort.go b/vendor/github.com/lxc/lxd/lxd/device/config/devices_sort.go deleted file mode 100644 index bd7d84d1..00000000 --- a/vendor/github.com/lxc/lxd/lxd/device/config/devices_sort.go +++ /dev/null @@ -1,71 +0,0 @@ -package config - -// DeviceNamed contains the name of a device and its config. -type DeviceNamed struct { - Name string - Config Device -} - -// DevicesSortable is a sortable slice of device names and config. -type DevicesSortable []DeviceNamed - -func (devices DevicesSortable) Len() int { - return len(devices) -} - -func (devices DevicesSortable) Less(i, j int) bool { - a := devices[i] - b := devices[j] - - // First sort by types. - if a.Config["type"] != b.Config["type"] { - // In VMs, network interface names are derived from PCI - // location. As a result of that, we must ensure that nic devices will - // always show up at the same spot regardless of what other devices may be - // added. Easiest way to do this is to always have them show up first. - if a.Config["type"] == "nic" { - return true - } - - if b.Config["type"] == "nic" { - return false - } - - // Start disks before other non-nic devices so that any unmounts triggered by deferred resizes - // specified in volatile "apply_quota" key can occur first and the rest of the devices can rely on - // the instance's root disk being mounted. - if a.Config["type"] == "disk" { - return true - } - - if b.Config["type"] == "disk" { - return false - } - - // Otherwise start devices of same type together. - return a.Config["type"] > b.Config["type"] - } - - // Start disk devices in path order. - if a.Config["type"] == "disk" && b.Config["type"] == "disk" { - if a.Config["path"] != b.Config["path"] { - // The root device always goes first. - if a.Config["path"] == "/" { - return true - } - - if b.Config["path"] == "/" { - return false - } - - return a.Config["path"] < b.Config["path"] - } - } - - // Fallback to sorting by names. - return a.Name < b.Name -} - -func (devices DevicesSortable) Swap(i, j int) { - devices[i], devices[j] = devices[j], devices[i] -} diff --git a/vendor/github.com/lxc/lxd/lxd/device/config/devices_utils.go b/vendor/github.com/lxc/lxd/lxd/device/config/devices_utils.go deleted file mode 100644 index 3a9a2dc4..00000000 --- a/vendor/github.com/lxc/lxd/lxd/device/config/devices_utils.go +++ /dev/null @@ -1,37 +0,0 @@ -package config - -// deviceEquals checks for any difference and addition/removal of properties. -func deviceEquals(old Device, d Device) bool { - for k := range d { - if d[k] != old[k] { - return false - } - } - - for k := range old { - if d[k] != old[k] { - return false - } - } - - return true -} - -// deviceEqualsDiffKeys checks for any difference and addition/removal of properties and returns a list of changes. -func deviceEqualsDiffKeys(old Device, d Device) []string { - keys := []string{} - - for k := range d { - if d[k] != old[k] { - keys = append(keys, k) - } - } - - for k := range old { - if d[k] != old[k] { - keys = append(keys, k) - } - } - - return keys -} diff --git a/vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_type.go b/vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_type.go deleted file mode 100644 index 4ddebeaf..00000000 --- a/vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_type.go +++ /dev/null @@ -1,62 +0,0 @@ -package instancetype - -import ( - "fmt" - - "github.com/lxc/lxd/shared/api" -) - -// Type indicates the type of instance. -type Type int - -const ( - // Any represents any type of instance. - Any = Type(-1) - - // Container represents a container instance type. - Container = Type(0) - - // VM represents a virtual-machine instance type. - VM = Type(1) -) - -// New validates the supplied string against the allowed types of instance and returns the internal -// representation of that type. If empty string is supplied then the type returned is TypeContainer. -// If an invalid name is supplied an error will be returned. -func New(name string) (Type, error) { - // If "container" or "" is supplied, return type as Container. - if api.InstanceType(name) == api.InstanceTypeContainer || name == "" { - return Container, nil - } - - // If "virtual-machine" is supplied, return type as VM. - if api.InstanceType(name) == api.InstanceTypeVM { - return VM, nil - } - - return -1, fmt.Errorf("Invalid instance type") -} - -// String converts the internal representation of instance type to a string used in API requests. -// Returns empty string if value is not a valid instance type. -func (instanceType Type) String() string { - if instanceType == Container { - return string(api.InstanceTypeContainer) - } - - if instanceType == VM { - return string(api.InstanceTypeVM) - } - - return "" -} - -// Filter returns a valid filter field compatible with cluster.InstanceFilter. -// 'Any' represents any possible instance type, and so it is omitted. -func (instanceType Type) Filter() *Type { - if instanceType == Any { - return nil - } - - return &instanceType -} diff --git a/vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_vmagent.go b/vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_vmagent.go deleted file mode 100644 index 4a5125bb..00000000 --- a/vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_vmagent.go +++ /dev/null @@ -1,22 +0,0 @@ -package instancetype - -import ( - deviceConfig "github.com/lxc/lxd/lxd/device/config" -) - -// VMAgentMount defines mounts to perform inside VM via agent. -type VMAgentMount struct { - Source string `json:"source"` - Target string `json:"target"` - FSType string `json:"fstype"` - Options []string `json:"options"` -} - -// VMAgentData represents the instance data exposed to the VM agent. -type VMAgentData struct { - Name string `json:"name"` - CloudInitID string `json:"cloud_init_id"` - Location string `json:"location"` - Config map[string]string `json:"config,omitempty"` - Devices map[string]deviceConfig.Device `json:"devices,omitempty"` -} diff --git a/vendor/github.com/lxc/lxd/lxd/revert/revert.go b/vendor/github.com/lxc/lxd/lxd/revert/revert.go deleted file mode 100644 index b86a1e13..00000000 --- a/vendor/github.com/lxc/lxd/lxd/revert/revert.go +++ /dev/null @@ -1,47 +0,0 @@ -package revert - -// Hook is a function that can be added to the revert via the Add() function. -// These will be run in the reverse order that they were added if the reverter's Fail() function is called. -type Hook func() - -// Reverter is a helper type to manage revert functions. -type Reverter struct { - revertFuncs []Hook -} - -// New returns a new Reverter. -func New() *Reverter { - return &Reverter{} -} - -// Add adds a revert function to the list to be run when Revert() is called. -func (r *Reverter) Add(f Hook) { - r.revertFuncs = append(r.revertFuncs, f) -} - -// Fail runs any revert functions in the reverse order they were added. -// Should be used with defer or when a task has encountered an error and needs to be reverted. -func (r *Reverter) Fail() { - funcCount := len(r.revertFuncs) - for k := range r.revertFuncs { - // Run the revert functions in reverse order. - k = funcCount - 1 - k - r.revertFuncs[k]() - } -} - -// Success clears the revert functions previously added. -// Should be called on successful completion of a task to prevent revert functions from being run. -func (r *Reverter) Success() { - r.revertFuncs = nil -} - -// Clone returns a copy of the reverter with the current set of revert functions added. -// This can be used if you want to return a reverting function to an external caller but do not want to actually -// execute the previously deferred reverter.Fail() function. -func (r *Reverter) Clone() *Reverter { - rNew := New() - rNew.revertFuncs = append(make([]Hook, 0, len(r.revertFuncs)), r.revertFuncs...) - - return rNew -} diff --git a/vendor/github.com/lxc/lxd/shared/api/certificate.go b/vendor/github.com/lxc/lxd/shared/api/certificate.go deleted file mode 100644 index f6ad0ddd..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/certificate.go +++ /dev/null @@ -1,128 +0,0 @@ -package api - -import ( - "encoding/base64" - "encoding/json" - "time" -) - -// CertificateTypeClient indicates a client certificate type. -const CertificateTypeClient = "client" - -// CertificateTypeServer indicates a server certificate type. -const CertificateTypeServer = "server" - -// CertificateTypeMetrics indicates a metrics certificate type. -const CertificateTypeMetrics = "metrics" - -// CertificateTypeUnknown indicates an unknown certificate type. -const CertificateTypeUnknown = "unknown" - -// CertificatesPost represents the fields of a new LXD certificate -// -// swagger:model -type CertificatesPost struct { - CertificatePut `yaml:",inline"` - - // Server trust password (used to add an untrusted client) - // Example: blah - Password string `json:"password" yaml:"password"` - - // Whether to create a certificate add token - // Example: true - // - // API extension: certificate_token - Token bool `json:"token" yaml:"token"` -} - -// CertificatePut represents the modifiable fields of a LXD certificate -// -// swagger:model -// -// API extension: certificate_update. -type CertificatePut struct { - // Name associated with the certificate - // Example: castiana - Name string `json:"name" yaml:"name"` - - // Usage type for the certificate - // Example: client - Type string `json:"type" yaml:"type"` - - // Whether to limit the certificate to listed projects - // Example: true - // - // API extension: certificate_project - Restricted bool `json:"restricted" yaml:"restricted"` - - // List of allowed projects (applies when restricted) - // Example: ["default", "foo", "bar"] - // - // API extension: certificate_project - Projects []string `json:"projects" yaml:"projects"` - - // The certificate itself, as PEM encoded X509 - // Example: X509 PEM certificate - // - // API extension: certificate_self_renewal - Certificate string `json:"certificate" yaml:"certificate"` -} - -// Certificate represents a LXD certificate -// -// swagger:model -type Certificate struct { - CertificatePut `yaml:",inline"` - - // SHA256 fingerprint of the certificate - // Read only: true - // Example: fd200419b271f1dc2a5591b693cc5774b7f234e1ff8c6b78ad703b6888fe2b69 - Fingerprint string `json:"fingerprint" yaml:"fingerprint"` -} - -// Writable converts a full Certificate struct into a CertificatePut struct (filters read-only fields). -func (cert *Certificate) Writable() CertificatePut { - return cert.CertificatePut -} - -// URL returns the URL for the certificate. -func (c *Certificate) URL(apiVersion string) *URL { - return NewURL().Path(apiVersion, "certificates", c.Fingerprint) -} - -// CertificateAddToken represents the fields contained within an encoded certificate add token. -// -// swagger:model -// -// API extension: certificate_token. -type CertificateAddToken struct { - // The name of the new client - // Example: user@host - ClientName string `json:"client_name" yaml:"client_name"` - - // The fingerprint of the network certificate - // Example: 57bb0ff4340b5bb28517e062023101adf788c37846dc8b619eb2c3cb4ef29436 - Fingerprint string `json:"fingerprint" yaml:"fingerprint"` - - // The addresses of the server - // Example: ["10.98.30.229:8443"] - Addresses []string `json:"addresses" yaml:"addresses"` - - // The random join secret - // Example: 2b2284d44db32675923fe0d2020477e0e9be11801ff70c435e032b97028c35cd - Secret string `json:"secret" yaml:"secret"` - - // The token's expiry date. - // Example: 2021-03-23T17:38:37.753398689-04:00 - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` -} - -// String encodes the certificate add token as JSON and then base64. -func (t *CertificateAddToken) String() string { - joinTokenJSON, err := json.Marshal(t) - if err != nil { - return "" - } - - return base64.StdEncoding.EncodeToString(joinTokenJSON) -} diff --git a/vendor/github.com/lxc/lxd/shared/api/cluster.go b/vendor/github.com/lxc/lxd/shared/api/cluster.go deleted file mode 100644 index 8a266e74..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/cluster.go +++ /dev/null @@ -1,312 +0,0 @@ -package api - -import ( - "encoding/base64" - "encoding/json" - "time" -) - -// Cluster represents high-level information about a LXD cluster. -// -// swagger:model -// -// API extension: clustering. -type Cluster struct { - // Name of the cluster member answering the request - // Example: lxd01 - ServerName string `json:"server_name" yaml:"server_name"` - - // Whether clustering is enabled - // Example: true - Enabled bool `json:"enabled" yaml:"enabled"` - - // List of member configuration keys (used during join) - // Example: [] - // - // API extension: clustering_join - MemberConfig []ClusterMemberConfigKey `json:"member_config" yaml:"member_config"` -} - -// ClusterMemberConfigKey represents a single config key that a new member of -// the cluster is required to provide when joining. -// -// The Value field is empty when getting clustering information with GET -// /1.0/cluster, and should be filled by the joining node when performing a PUT -// /1.0/cluster join request. -// -// swagger:model -// -// API extension: clustering_join. -type ClusterMemberConfigKey struct { - // The kind of configuration key (network, storage-pool, ...) - // Example: storage-pool - Entity string `json:"entity" yaml:"entity"` - - // The name of the object requiring this key - // Example: local - Name string `json:"name" yaml:"name"` - - // The name of the key - // Example: source - Key string `json:"key" yaml:"key"` - - // The value on the answering cluster member - // Example: /dev/sdb - Value string `json:"value" yaml:"value"` - - // A human friendly description key - // Example: "source" property for storage pool "local" - Description string `json:"description" yaml:"description"` -} - -// ClusterPut represents the fields required to bootstrap or join a LXD -// cluster. -// -// swagger:model -// -// API extension: clustering. -type ClusterPut struct { - Cluster `yaml:",inline"` - - // The address of the cluster you wish to join - // Example: 10.0.0.1:8443 - ClusterAddress string `json:"cluster_address" yaml:"cluster_address"` - - // The expected certificate (X509 PEM encoded) for the cluster - // Example: X509 PEM certificate - ClusterCertificate string `json:"cluster_certificate" yaml:"cluster_certificate"` - - // The local address to use for cluster communication - // Example: 10.0.0.2:8443 - // - // API extension: clustering_join - ServerAddress string `json:"server_address" yaml:"server_address"` - - // The trust password of the cluster you're trying to join - // Example: blah - // - // API extension: clustering_join - ClusterPassword string `json:"cluster_password" yaml:"cluster_password"` -} - -// ClusterMembersPost represents the fields required to request a join token to add a member to the cluster. -// -// swagger:model -// -// API extension: clustering_join_token. -type ClusterMembersPost struct { - // The name of the new cluster member - // Example: lxd02 - ServerName string `json:"server_name" yaml:"server_name"` -} - -// ClusterMemberJoinToken represents the fields contained within an encoded cluster member join token. -// -// swagger:model -// -// API extension: clustering_join_token. -type ClusterMemberJoinToken struct { - // The name of the new cluster member - // Example: lxd02 - ServerName string `json:"server_name" yaml:"server_name"` - - // The fingerprint of the network certificate - // Example: 57bb0ff4340b5bb28517e062023101adf788c37846dc8b619eb2c3cb4ef29436 - Fingerprint string `json:"fingerprint" yaml:"fingerprint"` - - // The addresses of existing online cluster members - // Example: ["10.98.30.229:8443"] - Addresses []string `json:"addresses" yaml:"addresses"` - - // The random join secret. - // Example: 2b2284d44db32675923fe0d2020477e0e9be11801ff70c435e032b97028c35cd - Secret string `json:"secret" yaml:"secret"` - - // The token's expiry date. - // Example: 2021-03-23T17:38:37.753398689-04:00 - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` -} - -// String encodes the cluster member join token as JSON and then base64. -func (t *ClusterMemberJoinToken) String() string { - joinTokenJSON, err := json.Marshal(t) - if err != nil { - return "" - } - - return base64.StdEncoding.EncodeToString(joinTokenJSON) -} - -// ClusterMemberPost represents the fields required to rename a LXD node. -// -// swagger:model -// -// API extension: clustering. -type ClusterMemberPost struct { - // The new name of the cluster member - // Example: lxd02 - ServerName string `json:"server_name" yaml:"server_name"` -} - -// ClusterMember represents the a LXD node in the cluster. -// -// swagger:model -// -// API extension: clustering. -type ClusterMember struct { - ClusterMemberPut `yaml:",inline"` - - // Name of the cluster member - // Example: lxd01 - ServerName string `json:"server_name" yaml:"server_name"` - - // URL at which the cluster member can be reached - // Example: https://10.0.0.1:8443 - URL string `json:"url" yaml:"url"` - - // Whether the cluster member is a database server - // Example: true - Database bool `json:"database" yaml:"database"` - - // Current status - // Example: Online - Status string `json:"status" yaml:"status"` - - // Additional status information - // Example: fully operational - Message string `json:"message" yaml:"message"` - - // The primary architecture of the cluster member - // Example: x86_64 - // - // API extension: clustering_architecture - Architecture string `json:"architecture" yaml:"architecture"` -} - -// Writable converts a full Profile struct into a ProfilePut struct (filters read-only fields). -func (member *ClusterMember) Writable() ClusterMemberPut { - return member.ClusterMemberPut -} - -// ClusterMemberPut represents the modifiable fields of a LXD cluster member -// -// swagger:model -// -// API extension: clustering_edit_roles. -type ClusterMemberPut struct { - // List of roles held by this cluster member - // Example: ["database"] - // - // API extension: clustering_roles - Roles []string `json:"roles" yaml:"roles"` - - // Name of the failure domain for this cluster member - // Example: rack1 - // - // API extension: clustering_failure_domains - FailureDomain string `json:"failure_domain" yaml:"failure_domain"` - - // Cluster member description - // Example: AMD Epyc 32c/64t - // - // API extension: clustering_description - Description string `json:"description" yaml:"description"` - - // Additional configuration information - // Example: {"scheduler.instance": "all"} - // - // API extension: clustering_config - Config map[string]string `json:"config" yaml:"config"` - - // List of cluster groups this member belongs to - // Example: ["group1", "group2"] - // - // API extension: clustering_groups - Groups []string `json:"groups" yaml:"groups"` -} - -// ClusterCertificatePut represents the certificate and key pair for all members in a LXD Cluster -// -// swagger:model -// -// API extension: clustering_update_certs. -type ClusterCertificatePut struct { - // The new certificate (X509 PEM encoded) for the cluster - // Example: X509 PEM certificate - ClusterCertificate string `json:"cluster_certificate" yaml:"cluster_certificate"` - - // The new certificate key (X509 PEM encoded) for the cluster - // Example: X509 PEM certificate key - ClusterCertificateKey string `json:"cluster_certificate_key" yaml:"cluster_certificate_key"` -} - -// ClusterMemberStatePost represents the fields required to evacuate a cluster member. -// -// swagger:model -// -// API extension: clustering_evacuation. -type ClusterMemberStatePost struct { - // The action to be performed. Valid actions are "evacuate" and "restore". - // Example: evacuate - Action string `json:"action" yaml:"action"` - - // Override the configured evacuation mode. - // Example: stop - // - // API extension: clustering_evacuate_mode - Mode string `json:"mode" yaml:"mode"` -} - -// ClusterGroupsPost represents the fields available for a new cluster group. -// -// swagger:model -// -// API extension: clustering_groups. -type ClusterGroupsPost struct { - ClusterGroupPut - - // The new name of the cluster group - // Example: group1 - Name string `json:"name" yaml:"name"` -} - -// ClusterGroup represents a cluster group. -// -// swagger:model -// -// API extension: clustering_groups. -type ClusterGroup struct { - ClusterGroupPut `yaml:",inline"` - ClusterGroupPost `yaml:",inline"` -} - -// ClusterGroupPost represents the fields required to rename a cluster group. -// -// swagger:model -// -// API extension: clustering_groups. -type ClusterGroupPost struct { - // The new name of the cluster group - // Example: group1 - Name string `json:"name" yaml:"name"` -} - -// ClusterGroupPut represents the modifiable fields of a cluster group. -// -// swagger:model -// -// API extension: clustering_groups. -type ClusterGroupPut struct { - // The description of the cluster group - // Example: amd64 servers - Description string `json:"description" yaml:"description"` - - // List of members in this group - // Example: ["node1", "node3"] - Members []string `json:"members" yaml:"members"` -} - -// Writable converts a full ClusterGroup struct into a ClusterGroupPut struct (filters read-only fields). -func (c *ClusterGroup) Writable() ClusterGroupPut { - return c.ClusterGroupPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/cluster_state.go b/vendor/github.com/lxc/lxd/shared/api/cluster_state.go deleted file mode 100644 index 8e3a54d7..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/cluster_state.go +++ /dev/null @@ -1,28 +0,0 @@ -package api - -// ClusterMemberSysInfo represents the sysinfo of a cluster member. -// -// swagger:model -// -// API extension: cluster_member_state. -type ClusterMemberSysInfo struct { - Uptime int64 `json:"uptime" yaml:"uptime"` - LoadAverages []float64 `json:"load_averages" yaml:"load_averages"` - TotalRAM uint64 `json:"total_ram" yaml:"total_ram"` - FreeRAM uint64 `json:"free_ram" yaml:"free_ram"` - SharedRAM uint64 `json:"shared_ram" yaml:"shared_ram"` - BufferRAM uint64 `json:"buffered_ram" yaml:"buffered_ram"` - TotalSwap uint64 `json:"total_swap" yaml:"total_swap"` - FreeSwap uint64 `json:"free_swap" yaml:"free_swap"` - Processes uint16 `json:"processes" yaml:"processes"` -} - -// ClusterMemberState represents the state of a cluster member. -// -// swagger:model -// -// API extension: cluster_member_state. -type ClusterMemberState struct { - SysInfo ClusterMemberSysInfo `json:"sysinfo" yaml:"sysinfo"` - StoragePools map[string]StoragePoolState `json:"storage_pools" yaml:"storage_pools"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/container.go b/vendor/github.com/lxc/lxd/shared/api/container.go deleted file mode 100644 index cb7e8d82..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/container.go +++ /dev/null @@ -1,141 +0,0 @@ -package api - -import ( - "time" -) - -// ContainersPost represents the fields available for a new LXD container. -type ContainersPost struct { - ContainerPut `yaml:",inline"` - - Name string `json:"name" yaml:"name"` - Source ContainerSource `json:"source" yaml:"source"` - - InstanceType string `json:"instance_type" yaml:"instance_type"` -} - -// ContainerPost represents the fields required to rename/move a LXD container. -type ContainerPost struct { - // Used for renames - Name string `json:"name" yaml:"name"` - - // Used for migration - Migration bool `json:"migration" yaml:"migration"` - - // API extension: container_stateless_copy - Live bool `json:"live" yaml:"live"` - - // API extension: container_only_migration - ContainerOnly bool `json:"container_only" yaml:"container_only"` - - // API extension: container_push_target - Target *ContainerPostTarget `json:"target" yaml:"target"` -} - -// ContainerPostTarget represents the migration target host and operation -// -// API extension: container_push_target. -type ContainerPostTarget struct { - Certificate string `json:"certificate" yaml:"certificate"` - Operation string `json:"operation,omitempty" yaml:"operation,omitempty"` - Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"` -} - -// ContainerPut represents the modifiable fields of a LXD container. -type ContainerPut struct { - Architecture string `json:"architecture" yaml:"architecture"` - Config map[string]string `json:"config" yaml:"config"` - Devices map[string]map[string]string `json:"devices" yaml:"devices"` - Ephemeral bool `json:"ephemeral" yaml:"ephemeral"` - Profiles []string `json:"profiles" yaml:"profiles"` - - // For snapshot restore - Restore string `json:"restore,omitempty" yaml:"restore,omitempty"` - Stateful bool `json:"stateful" yaml:"stateful"` - - // API extension: entity_description - Description string `json:"description" yaml:"description"` -} - -// Container represents a LXD container. -type Container struct { - ContainerPut `yaml:",inline"` - - CreatedAt time.Time `json:"created_at" yaml:"created_at"` - ExpandedConfig map[string]string `json:"expanded_config" yaml:"expanded_config"` - ExpandedDevices map[string]map[string]string `json:"expanded_devices" yaml:"expanded_devices"` - Name string `json:"name" yaml:"name"` - Status string `json:"status" yaml:"status"` - StatusCode StatusCode `json:"status_code" yaml:"status_code"` - - // API extension: container_last_used_at - LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"` - - // API extension: clustering - Location string `json:"location" yaml:"location"` -} - -// ContainerFull is a combination of Container, ContainerState and CotnainerSnapshot -// -// API extension: container_full. -type ContainerFull struct { - Container `yaml:",inline"` - - Backups []ContainerBackup `json:"backups" yaml:"backups"` - State *ContainerState `json:"state" yaml:"state"` - Snapshots []ContainerSnapshot `json:"snapshots" yaml:"snapshots"` -} - -// Writable converts a full Container struct into a ContainerPut struct (filters read-only fields). -func (c *Container) Writable() ContainerPut { - return c.ContainerPut -} - -// IsActive checks whether the container state indicates the container is active. -func (c Container) IsActive() bool { - switch c.StatusCode { - case Stopped: - return false - case Error: - return false - default: - return true - } -} - -// ContainerSource represents the creation source for a new container. -type ContainerSource struct { - Type string `json:"type" yaml:"type"` - Certificate string `json:"certificate" yaml:"certificate"` - - // For "image" type - Alias string `json:"alias,omitempty" yaml:"alias,omitempty"` - Fingerprint string `json:"fingerprint,omitempty" yaml:"fingerprint,omitempty"` - Properties map[string]string `json:"properties,omitempty" yaml:"properties,omitempty"` - Server string `json:"server,omitempty" yaml:"server,omitempty"` - Secret string `json:"secret,omitempty" yaml:"secret,omitempty"` - Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"` - - // For "migration" and "copy" types - BaseImage string `json:"base-image,omitempty" yaml:"base-image,omitempty"` - - // For "migration" type - Mode string `json:"mode,omitempty" yaml:"mode,omitempty"` - Operation string `json:"operation,omitempty" yaml:"operation,omitempty"` - Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"` - - // For "copy" type - Source string `json:"source,omitempty" yaml:"source,omitempty"` - - // API extension: container_push - Live bool `json:"live,omitempty" yaml:"live,omitempty"` - - // API extension: container_only_migration - ContainerOnly bool `json:"container_only,omitempty" yaml:"container_only,omitempty"` - - // API extension: container_incremental_copy - Refresh bool `json:"refresh,omitempty" yaml:"refresh,omitempty"` - - // API extension: container_copy_project - Project string `json:"project,omitempty" yaml:"project,omitempty"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/container_backup.go b/vendor/github.com/lxc/lxd/shared/api/container_backup.go deleted file mode 100644 index da40c36f..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/container_backup.go +++ /dev/null @@ -1,31 +0,0 @@ -package api - -import ( - "time" -) - -// ContainerBackupsPost represents the fields available for a new LXD container backup -// API extension: container_backup. -type ContainerBackupsPost struct { - Name string `json:"name" yaml:"name"` - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` - ContainerOnly bool `json:"container_only" yaml:"container_only"` - OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"` -} - -// ContainerBackup represents a LXD container backup -// API extension: container_backup. -type ContainerBackup struct { - Name string `json:"name" yaml:"name"` - CreatedAt time.Time `json:"created_at" yaml:"created_at"` - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` - ContainerOnly bool `json:"container_only" yaml:"container_only"` - OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"` -} - -// ContainerBackupPost represents the fields available for the renaming of a -// container backup -// API extension: container_backup. -type ContainerBackupPost struct { - Name string `json:"name" yaml:"name"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/container_console.go b/vendor/github.com/lxc/lxd/shared/api/container_console.go deleted file mode 100644 index e237b408..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/container_console.go +++ /dev/null @@ -1,17 +0,0 @@ -package api - -// ContainerConsoleControl represents a message on the container console "control" socket -// -// API extension: console. -type ContainerConsoleControl struct { - Command string `json:"command" yaml:"command"` - Args map[string]string `json:"args" yaml:"args"` -} - -// ContainerConsolePost represents a LXD container console request -// -// API extension: console. -type ContainerConsolePost struct { - Width int `json:"width" yaml:"width"` - Height int `json:"height" yaml:"height"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/container_exec.go b/vendor/github.com/lxc/lxd/shared/api/container_exec.go deleted file mode 100644 index ca70ff20..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/container_exec.go +++ /dev/null @@ -1,26 +0,0 @@ -package api - -// ContainerExecControl represents a message on the container exec "control" socket. -type ContainerExecControl struct { - Command string `json:"command" yaml:"command"` - Args map[string]string `json:"args" yaml:"args"` - Signal int `json:"signal" yaml:"signal"` -} - -// ContainerExecPost represents a LXD container exec request. -type ContainerExecPost struct { - Command []string `json:"command" yaml:"command"` - WaitForWS bool `json:"wait-for-websocket" yaml:"wait-for-websocket"` - Interactive bool `json:"interactive" yaml:"interactive"` - Environment map[string]string `json:"environment" yaml:"environment"` - Width int `json:"width" yaml:"width"` - Height int `json:"height" yaml:"height"` - - // API extension: container_exec_recording - RecordOutput bool `json:"record-output" yaml:"record-output"` - - // API extension: container_user_group_cwd - User uint32 `json:"user" yaml:"user"` - Group uint32 `json:"group" yaml:"group"` - Cwd string `json:"cwd" yaml:"cwd"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/container_snapshot.go b/vendor/github.com/lxc/lxd/shared/api/container_snapshot.go deleted file mode 100644 index ffa46229..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/container_snapshot.go +++ /dev/null @@ -1,53 +0,0 @@ -package api - -import ( - "time" -) - -// ContainerSnapshotsPost represents the fields available for a new LXD container snapshot. -type ContainerSnapshotsPost struct { - Name string `json:"name" yaml:"name"` - Stateful bool `json:"stateful" yaml:"stateful"` - - // API extension: snapshot_expiry_creation - ExpiresAt *time.Time `json:"expires_at" yaml:"expires_at"` -} - -// ContainerSnapshotPost represents the fields required to rename/move a LXD container snapshot. -type ContainerSnapshotPost struct { - Name string `json:"name" yaml:"name"` - Migration bool `json:"migration" yaml:"migration"` - Target *ContainerPostTarget `json:"target" yaml:"target"` - - // API extension: container_snapshot_stateful_migration - Live bool `json:"live,omitempty" yaml:"live,omitempty"` -} - -// ContainerSnapshotPut represents the modifiable fields of a LXD container snapshot -// API extension: snapshot_expiry. -type ContainerSnapshotPut struct { - Architecture string `json:"architecture" yaml:"architecture"` - Config map[string]string `json:"config" yaml:"config"` - Devices map[string]map[string]string `json:"devices" yaml:"devices"` - Ephemeral bool `json:"ephemeral" yaml:"ephemeral"` - Profiles []string `json:"profiles" yaml:"profiles"` - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` -} - -// ContainerSnapshot represents a LXD conainer snapshot. -type ContainerSnapshot struct { - ContainerSnapshotPut `yaml:",inline"` - - CreatedAt time.Time `json:"created_at" yaml:"created_at"` - ExpandedConfig map[string]string `json:"expanded_config" yaml:"expanded_config"` - ExpandedDevices map[string]map[string]string `json:"expanded_devices" yaml:"expanded_devices"` - LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"` - Name string `json:"name" yaml:"name"` - Stateful bool `json:"stateful" yaml:"stateful"` -} - -// Writable converts a full ContainerSnapshot struct into a ContainerSnapshotPut struct -// (filters read-only fields). -func (c *ContainerSnapshot) Writable() ContainerSnapshotPut { - return c.ContainerSnapshotPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/container_state.go b/vendor/github.com/lxc/lxd/shared/api/container_state.go deleted file mode 100644 index 4a42b600..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/container_state.go +++ /dev/null @@ -1,70 +0,0 @@ -package api - -// ContainerStatePut represents the modifiable fields of a LXD container's state. -type ContainerStatePut struct { - Action string `json:"action" yaml:"action"` - Timeout int `json:"timeout" yaml:"timeout"` - Force bool `json:"force" yaml:"force"` - Stateful bool `json:"stateful" yaml:"stateful"` -} - -// ContainerState represents a LXD container's state. -type ContainerState struct { - Status string `json:"status" yaml:"status"` - StatusCode StatusCode `json:"status_code" yaml:"status_code"` - Disk map[string]ContainerStateDisk `json:"disk" yaml:"disk"` - Memory ContainerStateMemory `json:"memory" yaml:"memory"` - Network map[string]ContainerStateNetwork `json:"network" yaml:"network"` - Pid int64 `json:"pid" yaml:"pid"` - Processes int64 `json:"processes" yaml:"processes"` - - // API extension: container_cpu_time - CPU ContainerStateCPU `json:"cpu" yaml:"cpu"` -} - -// ContainerStateDisk represents the disk information section of a LXD container's state. -type ContainerStateDisk struct { - Usage int64 `json:"usage" yaml:"usage"` -} - -// ContainerStateCPU represents the cpu information section of a LXD container's state -// -// API extension: container_cpu_time. -type ContainerStateCPU struct { - Usage int64 `json:"usage" yaml:"usage"` -} - -// ContainerStateMemory represents the memory information section of a LXD container's state. -type ContainerStateMemory struct { - Usage int64 `json:"usage" yaml:"usage"` - UsagePeak int64 `json:"usage_peak" yaml:"usage_peak"` - SwapUsage int64 `json:"swap_usage" yaml:"swap_usage"` - SwapUsagePeak int64 `json:"swap_usage_peak" yaml:"swap_usage_peak"` -} - -// ContainerStateNetwork represents the network information section of a LXD container's state. -type ContainerStateNetwork struct { - Addresses []ContainerStateNetworkAddress `json:"addresses" yaml:"addresses"` - Counters ContainerStateNetworkCounters `json:"counters" yaml:"counters"` - Hwaddr string `json:"hwaddr" yaml:"hwaddr"` - HostName string `json:"host_name" yaml:"host_name"` - Mtu int `json:"mtu" yaml:"mtu"` - State string `json:"state" yaml:"state"` - Type string `json:"type" yaml:"type"` -} - -// ContainerStateNetworkAddress represents a network address as part of the network section of a LXD container's state. -type ContainerStateNetworkAddress struct { - Family string `json:"family" yaml:"family"` - Address string `json:"address" yaml:"address"` - Netmask string `json:"netmask" yaml:"netmask"` - Scope string `json:"scope" yaml:"scope"` -} - -// ContainerStateNetworkCounters represents packet counters as part of the network section of a LXD container's state. -type ContainerStateNetworkCounters struct { - BytesReceived int64 `json:"bytes_received" yaml:"bytes_received"` - BytesSent int64 `json:"bytes_sent" yaml:"bytes_sent"` - PacketsReceived int64 `json:"packets_received" yaml:"packets_received"` - PacketsSent int64 `json:"packets_sent" yaml:"packets_sent"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/devlxd.go b/vendor/github.com/lxc/lxd/shared/api/devlxd.go deleted file mode 100644 index 340d38bd..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/devlxd.go +++ /dev/null @@ -1,25 +0,0 @@ -package api - -// DevLXDPut represents the modifiable data. -type DevLXDPut struct { - // Instance state - // Example: Started - State string `json:"state" yaml:"state"` -} - -// DevLXDGet represents the server data which is returned as the root of the devlxd API. -type DevLXDGet struct { - DevLXDPut - - // API version number - // Example: 1.0 - APIVersion string `json:"api_version" yaml:"api_version"` - - // Type (container or virtual-machine) - // Example: container - InstanceType string `json:"instance_type" yaml:"instance_type"` - - // What cluster member this instance is located on - // Example: lxd01 - Location string `json:"location" yaml:"location"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/doc.go b/vendor/github.com/lxc/lxd/shared/api/doc.go deleted file mode 100644 index 663f582e..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package api contains Go structs for all LXD API objects -// -// # Overview -// -// This package has Go structs for every API object, all the various -// structs are named after the object they represent and some variations of -// those structs exist for initial object creation, object update and -// object retrieval. -// -// A few convenience functions are also tied to those structs which let -// you convert between the various strucs for a given object and also query -// some of the more complex metadata that LXD can export. -package api diff --git a/vendor/github.com/lxc/lxd/shared/api/error.go b/vendor/github.com/lxc/lxd/shared/api/error.go deleted file mode 100644 index 7b608a34..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/error.go +++ /dev/null @@ -1,72 +0,0 @@ -package api - -import ( - "errors" - "fmt" - "net/http" -) - -// StatusErrorf returns a new StatusError containing the specified status and message. -func StatusErrorf(status int, format string, a ...any) StatusError { - var msg string - if len(a) > 0 { - msg = fmt.Sprintf(format, a...) - } else { - msg = format - } - - return StatusError{ - status: status, - msg: msg, - } -} - -// StatusError error type that contains an HTTP status code and message. -type StatusError struct { - status int - msg string -} - -// Error returns the error message or the http.StatusText() of the status code if message is empty. -func (e StatusError) Error() string { - if e.msg != "" { - return e.msg - } - - return http.StatusText(e.status) -} - -// Status returns the HTTP status code. -func (e StatusError) Status() int { - return e.status -} - -// StatusErrorMatch checks if err was caused by StatusError. Can optionally also check whether the StatusError's -// status code matches one of the supplied status codes in matchStatus. -// Returns the matched StatusError status code and true if match criteria are met, otherwise false. -func StatusErrorMatch(err error, matchStatusCodes ...int) (int, bool) { - var statusErr StatusError - - if errors.As(err, &statusErr) { - statusCode := statusErr.Status() - - if len(matchStatusCodes) <= 0 { - return statusCode, true - } - - for _, s := range matchStatusCodes { - if statusCode == s { - return statusCode, true - } - } - } - - return -1, false -} - -// StatusErrorCheck returns whether or not err was caused by a StatusError and if it matches one of the -// optional status codes. -func StatusErrorCheck(err error, matchStatusCodes ...int) bool { - _, found := StatusErrorMatch(err, matchStatusCodes...) - return found -} diff --git a/vendor/github.com/lxc/lxd/shared/api/event.go b/vendor/github.com/lxc/lxd/shared/api/event.go deleted file mode 100644 index 98f6c4b9..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/event.go +++ /dev/null @@ -1,164 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "time" -) - -// LXD event types. -const ( - EventTypeLifecycle = "lifecycle" - EventTypeLogging = "logging" - EventTypeOperation = "operation" -) - -// Event represents an event entry (over websocket) -// -// swagger:model -type Event struct { - // Event type (one of operation, logging or lifecycle) - // Example: lifecycle - Type string `yaml:"type" json:"type"` - - // Time at which the event was sent - // Example: 2021-02-24T19:00:45.452649098-05:00 - Timestamp time.Time `yaml:"timestamp" json:"timestamp"` - - // JSON encoded metadata (see EventLogging, EventLifecycle or Operation) - // Example: {"action": "instance-started", "source": "/1.0/instances/c1", "context": {}} - Metadata json.RawMessage `yaml:"metadata" json:"metadata"` - - // Originating cluster member - // Example: lxd01 - // - // API extension: event_location - Location string `yaml:"location,omitempty" json:"location,omitempty"` - - // Project the event belongs to. - // Example: default - // - // API extension: event_project - Project string `yaml:"project,omitempty" json:"project,omitempty"` -} - -// ToLogging creates log record for the event. -func (event *Event) ToLogging() (EventLogRecord, error) { - if event.Type == EventTypeLogging { - e := &EventLogging{} - err := json.Unmarshal(event.Metadata, &e) - if err != nil { - return EventLogRecord{}, err - } - - ctx := []any{} - for k, v := range e.Context { - ctx = append(ctx, k) - ctx = append(ctx, v) - } - - record := EventLogRecord{ - Time: event.Timestamp, - Lvl: e.Level, - Msg: e.Message, - Ctx: ctx, - } - - return record, nil - } else if event.Type == EventTypeLifecycle { - e := &EventLifecycle{} - err := json.Unmarshal(event.Metadata, &e) - if err != nil { - return EventLogRecord{}, err - } - - ctx := []any{} - for k, v := range e.Context { - ctx = append(ctx, k) - ctx = append(ctx, v) - } - - record := EventLogRecord{ - Time: event.Timestamp, - Lvl: "info", - Ctx: ctx, - } - - if e.Requestor != nil { - requestor := fmt.Sprintf("%s/%s (%s)", e.Requestor.Protocol, e.Requestor.Username, e.Requestor.Address) - record.Msg = fmt.Sprintf("Action: %s, Source: %s, Requestor: %s", e.Action, e.Source, requestor) - } else { - record.Msg = fmt.Sprintf("Action: %s, Source: %s", e.Action, e.Source) - } - - return record, nil - } else if event.Type == EventTypeOperation { - e := &Operation{} - err := json.Unmarshal(event.Metadata, &e) - if err != nil { - return EventLogRecord{}, err - } - - record := EventLogRecord{ - Time: event.Timestamp, - Lvl: "info", - Msg: fmt.Sprintf("ID: %s, Class: %s, Description: %s", e.ID, e.Class, e.Description), - Ctx: []any{ - "CreatedAt", e.CreatedAt, - "UpdatedAt", e.UpdatedAt, - "Status", e.Status, - "StatusCode", e.StatusCode, - "Resources", e.Resources, - "Metadata", e.Metadata, - "MayCancel", e.MayCancel, - "Err", e.Err, - "Location", e.Location, - }, - } - - return record, nil - } - - return EventLogRecord{}, fmt.Errorf("Not supported event type: %s", event.Type) -} - -// EventLogRecord represents single log record. -type EventLogRecord struct { - Time time.Time - Lvl string - Msg string - Ctx []any -} - -// EventLogging represents a logging type event entry (admin only). -type EventLogging struct { - Message string `yaml:"message" json:"message"` - Level string `yaml:"level" json:"level"` - Context map[string]string `yaml:"context" json:"context"` -} - -// EventLifecycle represets a lifecycle type event entry -// -// API extension: event_lifecycle. -type EventLifecycle struct { - Action string `yaml:"action" json:"action"` - Source string `yaml:"source" json:"source"` - Context map[string]any `yaml:"context,omitempty" json:"context,omitempty"` - - // API extension: event_lifecycle_requestor - Requestor *EventLifecycleRequestor `yaml:"requestor,omitempty" json:"requestor,omitempty"` -} - -// EventLifecycleRequestor represents the initial requestor for an event -// -// API extension: event_lifecycle_requestor. -type EventLifecycleRequestor struct { - Username string `yaml:"username" json:"username"` - Protocol string `yaml:"protocol" json:"protocol"` - - // Requestor address - // Example: 10.0.2.15 - // - // API extension: event_lifecycle_requestor_address - Address string `yaml:"address" json:"address"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/event_lifecycle.go b/vendor/github.com/lxc/lxd/shared/api/event_lifecycle.go deleted file mode 100644 index 4e3a5fe5..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/event_lifecycle.go +++ /dev/null @@ -1,122 +0,0 @@ -package api - -// Define consts for all the lifecycle events. -const ( - EventLifecycleCertificateCreated = "certificate-created" - EventLifecycleCertificateDeleted = "certificate-deleted" - EventLifecycleCertificateUpdated = "certificate-updated" - EventLifecycleClusterCertificateUpdated = "cluster-certificate-updated" - EventLifecycleClusterDisabled = "cluster-disabled" - EventLifecycleClusterEnabled = "cluster-enabled" - EventLifecycleClusterGroupCreated = "cluster-group-created" - EventLifecycleClusterGroupDeleted = "cluster-group-deleted" - EventLifecycleClusterGroupRenamed = "cluster-group-renamed" - EventLifecycleClusterGroupUpdated = "cluster-group-updated" - EventLifecycleClusterMemberAdded = "cluster-member-added" - EventLifecycleClusterMemberRemoved = "cluster-member-removed" - EventLifecycleClusterMemberRenamed = "cluster-member-renamed" - EventLifecycleClusterMemberUpdated = "cluster-member-updated" - EventLifecycleClusterTokenCreated = "cluster-token-created" - EventLifecycleConfigUpdated = "config-updated" - EventLifecycleImageAliasCreated = "image-alias-created" - EventLifecycleImageAliasDeleted = "image-alias-deleted" - EventLifecycleImageAliasRenamed = "image-alias-renamed" - EventLifecycleImageAliasUpdated = "image-alias-updated" - EventLifecycleImageCreated = "image-created" - EventLifecycleImageDeleted = "image-deleted" - EventLifecycleImageRefreshed = "image-refreshed" - EventLifecycleImageRetrieved = "image-retrieved" - EventLifecycleImageSecretCreated = "image-secret-created" - EventLifecycleImageUpdated = "image-updated" - EventLifecycleInstanceBackupCreated = "instance-backup-created" - EventLifecycleInstanceBackupDeleted = "instance-backup-deleted" - EventLifecycleInstanceBackupRenamed = "instance-backup-renamed" - EventLifecycleInstanceBackupRetrieved = "instance-backup-retrieved" - EventLifecycleInstanceConsole = "instance-console" - EventLifecycleInstanceConsoleReset = "instance-console-reset" - EventLifecycleInstanceConsoleRetrieved = "instance-console-retrieved" - EventLifecycleInstanceCreated = "instance-created" - EventLifecycleInstanceDeleted = "instance-deleted" - EventLifecycleInstanceExec = "instance-exec" - EventLifecycleInstanceFileDeleted = "instance-file-deleted" - EventLifecycleInstanceFilePushed = "instance-file-pushed" - EventLifecycleInstanceFileRetrieved = "instance-file-retrieved" - EventLifecycleInstanceLogDeleted = "instance-log-deleted" - EventLifecycleInstanceLogRetrieved = "instance-log-retrieved" - EventLifecycleInstanceMetadataRetrieved = "instance-metadata-retrieved" - EventLifecycleInstanceMetadataTemplateCreated = "instance-metadata-template-created" - EventLifecycleInstanceMetadataTemplateDeleted = "instance-metadata-template-deleted" - EventLifecycleInstanceMetadataTemplateRetrieved = "instance-metadata-template-retrieved" - EventLifecycleInstanceMetadataUpdated = "instance-metadata-updated" - EventLifecycleInstancePaused = "instance-paused" - EventLifecycleInstanceReady = "instance-ready" - EventLifecycleInstanceRenamed = "instance-renamed" - EventLifecycleInstanceRestarted = "instance-restarted" - EventLifecycleInstanceRestored = "instance-restored" - EventLifecycleInstanceResumed = "instance-resumed" - EventLifecycleInstanceShutdown = "instance-shutdown" - EventLifecycleInstanceSnapshotCreated = "instance-snapshot-created" - EventLifecycleInstanceSnapshotDeleted = "instance-snapshot-deleted" - EventLifecycleInstanceSnapshotRenamed = "instance-snapshot-renamed" - EventLifecycleInstanceSnapshotUpdated = "instance-snapshot-updated" - EventLifecycleInstanceStarted = "instance-started" - EventLifecycleInstanceStopped = "instance-stopped" - EventLifecycleInstanceUpdated = "instance-updated" - EventLifecycleNetworkACLCreated = "network-acl-created" - EventLifecycleNetworkACLDeleted = "network-acl-deleted" - EventLifecycleNetworkACLRenamed = "network-acl-renamed" - EventLifecycleNetworkACLUpdated = "network-acl-updated" - EventLifecycleNetworkCreated = "network-created" - EventLifecycleNetworkDeleted = "network-deleted" - EventLifecycleNetworkForwardCreated = "network-forward-created" - EventLifecycleNetworkForwardDeleted = "network-forward-deleted" - EventLifecycleNetworkForwardUpdated = "network-forward-updated" - EventLifecycleNetworkLoadBalancerCreated = "network-load-balancer-created" - EventLifecycleNetworkLoadBalancerDeleted = "network-load-balancer-deleted" - EventLifecycleNetworkLoadBalancerUpdated = "network-load-balancer-updated" - EventLifecycleNetworkPeerCreated = "network-peer-created" - EventLifecycleNetworkPeerDeleted = "network-peer-deleted" - EventLifecycleNetworkPeerUpdated = "network-peer-updated" - EventLifecycleNetworkRenamed = "network-renamed" - EventLifecycleNetworkUpdated = "network-updated" - EventLifecycleNetworkZoneCreated = "network-zone-created" - EventLifecycleNetworkZoneDeleted = "network-zone-deleted" - EventLifecycleNetworkZoneRecordCreated = "network-zone-record-created" - EventLifecycleNetworkZoneRecordDeleted = "network-zone-record-deleted" - EventLifecycleNetworkZoneRecordUpdated = "network-zone-record-updated" - EventLifecycleNetworkZoneUpdated = "network-zone-updated" - EventLifecycleOperationCancelled = "operation-cancelled" - EventLifecycleProfileCreated = "profile-created" - EventLifecycleProfileDeleted = "profile-deleted" - EventLifecycleProfileRenamed = "profile-renamed" - EventLifecycleProfileUpdated = "profile-updated" - EventLifecycleProjectCreated = "project-created" - EventLifecycleProjectDeleted = "project-deleted" - EventLifecycleProjectRenamed = "project-renamed" - EventLifecycleProjectUpdated = "project-updated" - EventLifecycleStoragePoolCreated = "storage-pool-created" - EventLifecycleStoragePoolDeleted = "storage-pool-deleted" - EventLifecycleStoragePoolUpdated = "storage-pool-updated" - EventLifecycleStorageBucketCreated = "storage-bucket-created" - EventLifecycleStorageBucketUpdated = "storage-bucket-updated" - EventLifecycleStorageBucketDeleted = "storage-bucket-deleted" - EventLifecycleStorageBucketKeyCreated = "storage-bucket-key-created" - EventLifecycleStorageBucketKeyUpdated = "storage-bucket-key-updated" - EventLifecycleStorageBucketKeyDeleted = "storage-bucket-key-deleted" - EventLifecycleStorageVolumeCreated = "storage-volume-created" - EventLifecycleStorageVolumeBackupCreated = "storage-volume-backup-created" - EventLifecycleStorageVolumeBackupDeleted = "storage-volume-backup-deleted" - EventLifecycleStorageVolumeBackupRenamed = "storage-volume-backup-renamed" - EventLifecycleStorageVolumeBackupRetrieved = "storage-volume-backup-retrieved" - EventLifecycleStorageVolumeDeleted = "storage-volume-deleted" - EventLifecycleStorageVolumeRenamed = "storage-volume-renamed" - EventLifecycleStorageVolumeRestored = "storage-volume-restored" - EventLifecycleStorageVolumeSnapshotCreated = "storage-volume-snapshot-created" - EventLifecycleStorageVolumeSnapshotDeleted = "storage-volume-snapshot-deleted" - EventLifecycleStorageVolumeSnapshotRenamed = "storage-volume-snapshot-renamed" - EventLifecycleStorageVolumeSnapshotUpdated = "storage-volume-snapshot-updated" - EventLifecycleStorageVolumeUpdated = "storage-volume-updated" - EventLifecycleWarningAcknowledged = "warning-acknowledged" - EventLifecycleWarningDeleted = "warning-deleted" - EventLifecycleWarningReset = "warning-reset" -) diff --git a/vendor/github.com/lxc/lxd/shared/api/image.go b/vendor/github.com/lxc/lxd/shared/api/image.go deleted file mode 100644 index d675378a..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/image.go +++ /dev/null @@ -1,323 +0,0 @@ -package api - -import ( - "time" -) - -// ImageExportPost represents the fields required to export a LXD image -// -// swagger:model -// -// API extension: images_push_relay. -type ImageExportPost struct { - // Target server URL - // Example: https://1.2.3.4:8443 - Target string `json:"target" yaml:"target"` - - // Image receive secret - // Example: RANDOM-STRING - Secret string `json:"secret" yaml:"secret"` - - // Remote server certificate - // Example: X509 PEM certificate - Certificate string `json:"certificate" yaml:"certificate"` - - // List of aliases to set on the image - Aliases []ImageAlias `json:"aliases" yaml:"aliases"` - - // Project name - // Example: project1 - // - // API extension: image_target_project - Project string `json:"project" yaml:"project"` - - // List of profiles to use - // Example: ["default"] - // - // API extension: image_copy_profile - Profiles []string `json:"profiles" yaml:"profiles"` -} - -// ImagesPost represents the fields available for a new LXD image -// -// swagger:model -type ImagesPost struct { - ImagePut `yaml:",inline"` - - // Original filename of the image - // Example: lxd.tar.xz - Filename string `json:"filename" yaml:"filename"` - - // Source of the image - Source *ImagesPostSource `json:"source" yaml:"source"` - - // Compression algorithm to use when turning an instance into an image - // Example: gzip - // - // API extension: image_compression_algorithm - CompressionAlgorithm string `json:"compression_algorithm" yaml:"compression_algorithm"` - - // Aliases to add to the image - // Example: [{"name": "foo"}, {"name": "bar"}] - // - // API extension: image_create_aliases - Aliases []ImageAlias `json:"aliases" yaml:"aliases"` -} - -// ImagesPostSource represents the source of a new LXD image -// -// swagger:model -type ImagesPostSource struct { - ImageSource `yaml:",inline"` - - // Transfer mode (push or pull) - // Example: pull - Mode string `json:"mode" yaml:"mode"` - - // Type of image source (instance, snapshot, image or url) - // Example: instance - Type string `json:"type" yaml:"type"` - - // Source URL (for type "url") - // Example: https://some-server.com/some-directory/ - URL string `json:"url" yaml:"url"` - - // Instance name (for type "instance" or "snapshot") - // Example: c1/snap0 - Name string `json:"name" yaml:"name"` - - // Source image fingerprint (for type "image") - // Example: 8ae945c52bb2f2df51c923b04022312f99bbb72c356251f54fa89ea7cf1df1d0 - Fingerprint string `json:"fingerprint" yaml:"fingerprint"` - - // Source image server secret token (when downloading private images) - // Example: RANDOM-STRING - Secret string `json:"secret" yaml:"secret"` - - // Source project name - // Example: project1 - // - // API extension: image_source_project - Project string `json:"project" yaml:"project"` -} - -// ImagePut represents the modifiable fields of a LXD image -// -// swagger:model -type ImagePut struct { - // Whether the image should auto-update when a new build is available - // Example: true - AutoUpdate bool `json:"auto_update" yaml:"auto_update"` - - // Descriptive properties - // Example: {"os": "Ubuntu", "release": "jammy", "variant": "cloud"} - Properties map[string]string `json:"properties" yaml:"properties"` - - // Whether the image is available to unauthenticated users - // Example: false - Public bool `json:"public" yaml:"public"` - - // When the image becomes obsolete - // Example: 2025-03-23T20:00:00-04:00 - // - // API extension: images_expiry - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` - - // List of profiles to use when creating from this image (if none provided by user) - // Example: ["default"] - // - // API extension: image_profiles - Profiles []string `json:"profiles" yaml:"profiles"` -} - -// Image represents a LXD image -// -// swagger:model -type Image struct { - ImagePut `yaml:",inline"` - - // List of aliases - Aliases []ImageAlias `json:"aliases" yaml:"aliases"` - - // Architecture - // Example: x86_64 - Architecture string `json:"architecture" yaml:"architecture"` - - // Whether the image is an automatically cached remote image - // Example: true - Cached bool `json:"cached" yaml:"cached"` - - // Original filename - // Example: 06b86454720d36b20f94e31c6812e05ec51c1b568cf3a8abd273769d213394bb.rootfs - Filename string `json:"filename" yaml:"filename"` - - // Full SHA-256 fingerprint - // Example: 06b86454720d36b20f94e31c6812e05ec51c1b568cf3a8abd273769d213394bb - Fingerprint string `json:"fingerprint" yaml:"fingerprint"` - - // Size of the image in bytes - // Example: 272237676 - Size int64 `json:"size" yaml:"size"` - - // Where the image came from - UpdateSource *ImageSource `json:"update_source,omitempty" yaml:"update_source,omitempty"` - - // Type of image (container or virtual-machine) - // Example: container - // - // API extension: image_types - Type string `json:"type" yaml:"type"` - - // When the image was originally created - // Example: 2021-03-23T20:00:00-04:00 - CreatedAt time.Time `json:"created_at" yaml:"created_at"` - - // Last time the image was used - // Example: 2021-03-22T20:39:00.575185384-04:00 - LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"` - - // When the image was added to this LXD server - // Example: 2021-03-24T14:18:15.115036787-04:00 - UploadedAt time.Time `json:"uploaded_at" yaml:"uploaded_at"` -} - -// Writable converts a full Image struct into a ImagePut struct (filters read-only fields). -func (img *Image) Writable() ImagePut { - return img.ImagePut -} - -// URL returns the URL for the image. -func (img *Image) URL(apiVersion string, project string) *URL { - return NewURL().Path(apiVersion, "images", img.Fingerprint).Project(project) -} - -// ImageAlias represents an alias from the alias list of a LXD image -// -// swagger:model -type ImageAlias struct { - // Name of the alias - // Example: ubuntu-22.04 - Name string `json:"name" yaml:"name"` - - // Description of the alias - // Example: Our preferred Ubuntu image - Description string `json:"description" yaml:"description"` -} - -// ImageSource represents the source of a LXD image -// -// swagger:model -type ImageSource struct { - // Source alias to download from - // Example: jammy - Alias string `json:"alias" yaml:"alias"` - - // Source server certificate (if not trusted by system CA) - // Example: X509 PEM certificate - Certificate string `json:"certificate" yaml:"certificate"` - - // Source server protocol - // Example: simplestreams - Protocol string `json:"protocol" yaml:"protocol"` - - // URL of the source server - // Example: https://images.linuxcontainers.org - Server string `json:"server" yaml:"server"` - - // Type of image (container or virtual-machine) - // Example: container - // - // API extension: image_types - ImageType string `json:"image_type" yaml:"image_type"` -} - -// ImageAliasesPost represents a new LXD image alias -// -// swagger:model -type ImageAliasesPost struct { - ImageAliasesEntry `yaml:",inline"` -} - -// ImageAliasesEntryPost represents the required fields to rename a LXD image alias -// -// swagger:model -type ImageAliasesEntryPost struct { - // Alias name - // Example: ubuntu-22.04 - Name string `json:"name" yaml:"name"` -} - -// ImageAliasesEntryPut represents the modifiable fields of a LXD image alias -// -// swagger:model -type ImageAliasesEntryPut struct { - // Alias description - // Example: Our preferred Ubuntu image - Description string `json:"description" yaml:"description"` - - // Target fingerprint for the alias - // Example: 06b86454720d36b20f94e31c6812e05ec51c1b568cf3a8abd273769d213394bb - Target string `json:"target" yaml:"target"` -} - -// ImageAliasesEntry represents a LXD image alias -// -// swagger:model -type ImageAliasesEntry struct { - ImageAliasesEntryPut `yaml:",inline"` - - // Alias name - // Example: ubuntu-22.04 - Name string `json:"name" yaml:"name"` - - // Alias type (container or virtual-machine) - // Example: container - // - // API extension: image_types - Type string `json:"type" yaml:"type"` -} - -// ImageMetadata represents LXD image metadata (used in image tarball) -// -// swagger:model -type ImageMetadata struct { - // Architecture name - // Example: x86_64 - Architecture string `json:"architecture" yaml:"architecture"` - - // Image creation data (as UNIX epoch) - // Example: 1620655439 - CreationDate int64 `json:"creation_date" yaml:"creation_date"` - - // Image expiry data (as UNIX epoch) - // Example: 1620685757 - ExpiryDate int64 `json:"expiry_date" yaml:"expiry_date"` - - // Descriptive properties - // Example: {"os": "Ubuntu", "release": "jammy", "variant": "cloud"} - Properties map[string]string `json:"properties" yaml:"properties"` - - // Template for files in the image - Templates map[string]*ImageMetadataTemplate `json:"templates" yaml:"templates"` -} - -// ImageMetadataTemplate represents a template entry in image metadata (used in image tarball) -// -// swagger:model -type ImageMetadataTemplate struct { - // When to trigger the template (create, copy or start) - // Example: create - When []string `json:"when" yaml:"when"` - - // Whether to trigger only if the file is missing - // Example: false - CreateOnly bool `json:"create_only" yaml:"create_only"` - - // The template itself as a valid pongo2 template - // Example: pongo2-template - Template string `json:"template" yaml:"template"` - - // Key/value properties to pass to the template - // Example: {"foo": "bar"} - Properties map[string]string `json:"properties" yaml:"properties"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/init.go b/vendor/github.com/lxc/lxd/shared/api/init.go deleted file mode 100644 index b93f6e93..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/init.go +++ /dev/null @@ -1,66 +0,0 @@ -package api - -// InitPreseed represents initialization configuration that can be supplied to `lxd init`. -// -// swagger:model -// -// API extension: preseed. -type InitPreseed struct { - Node InitLocalPreseed `yaml:",inline"` - Cluster *InitClusterPreseed `json:"cluster" yaml:"cluster"` -} - -// InitLocalPreseed represents initialization configuration for the local LXD. -// -// swagger:model -// -// API extension: preseed. -type InitLocalPreseed struct { - ServerPut `yaml:",inline"` - - // Networks by project to add to LXD - // Example: Network on the "default" project - Networks []InitNetworksProjectPost `json:"networks" yaml:"networks"` - - // Storage Pools to add to LXD - // Example: local dir storage pool - StoragePools []StoragePoolsPost `json:"storage_pools" yaml:"storage_pools"` - - // Profiles to add to LXD - // Example: "default" profile with a root disk device - Profiles []ProfilesPost `json:"profiles" yaml:"profiles"` - - // Projects to add to LXD - // Example: "default" project - Projects []ProjectsPost `json:"projects" yaml:"projects"` -} - -// InitNetworksProjectPost represents the fields of a new LXD network along with its associated project. -// -// swagger:model -// -// API extension: preseed. -type InitNetworksProjectPost struct { - NetworksPost `yaml:",inline"` - - // Project in which the network will reside - // Example: "default" - Project string -} - -// InitClusterPreseed represents initialization configuration for the LXD cluster. -// -// swagger:model -// -// API extension: preseed. -type InitClusterPreseed struct { - ClusterPut `yaml:",inline"` - - // The path to the cluster certificate - // Example: /tmp/cluster.crt - ClusterCertificatePath string `json:"cluster_certificate_path" yaml:"cluster_certificate_path"` - - // A cluster join token - // Example: BASE64-TOKEN - ClusterToken string `json:"cluster_token" yaml:"cluster_token"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/instance.go b/vendor/github.com/lxc/lxd/shared/api/instance.go deleted file mode 100644 index 47ab0707..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/instance.go +++ /dev/null @@ -1,348 +0,0 @@ -package api - -import ( - "strings" - "time" -) - -// GetParentAndSnapshotName returns the parent name, snapshot name, and whether it actually was a snapshot name. -func GetParentAndSnapshotName(name string) (string, string, bool) { - fields := strings.SplitN(name, "/", 2) - if len(fields) == 1 { - return name, "", false - } - - return fields[0], fields[1], true -} - -// InstanceType represents the type if instance being returned or requested via the API. -type InstanceType string - -// InstanceTypeAny defines the instance type value for requesting any instance type. -const InstanceTypeAny = InstanceType("") - -// InstanceTypeContainer defines the instance type value for a container. -const InstanceTypeContainer = InstanceType("container") - -// InstanceTypeVM defines the instance type value for a virtual-machine. -const InstanceTypeVM = InstanceType("virtual-machine") - -// InstancesPost represents the fields available for a new LXD instance. -// -// swagger:model -// -// API extension: instances. -type InstancesPost struct { - InstancePut `yaml:",inline"` - - // Instance name - // Example: foo - Name string `json:"name" yaml:"name"` - - // Creation source - Source InstanceSource `json:"source" yaml:"source"` - - // Cloud instance type (AWS, GCP, Azure, ...) to emulate with limits - // Example: t1.micro - InstanceType string `json:"instance_type" yaml:"instance_type"` - - // Type (container or virtual-machine) - // Example: container - Type InstanceType `json:"type" yaml:"type"` -} - -// InstancesPut represents the fields available for a mass update. -// -// swagger:model -// -// API extension: instance_bulk_state_change. -type InstancesPut struct { - // Desired runtime state - State *InstanceStatePut `json:"state" yaml:"state"` -} - -// InstancePost represents the fields required to rename/move a LXD instance. -// -// swagger:model -// -// API extension: instances. -type InstancePost struct { - // New name for the instance - // Example: bar - Name string `json:"name" yaml:"name"` - - // Whether the instance is being migrated to another server - // Example: false - Migration bool `json:"migration" yaml:"migration"` - - // Whether to perform a live migration (migration only) - // Example: false - Live bool `json:"live" yaml:"live"` - - // Whether snapshots should be discarded (migration only) - // Example: false - InstanceOnly bool `json:"instance_only" yaml:"instance_only"` - - // Whether snapshots should be discarded (migration only, deprecated, use instance_only) - // Example: false - ContainerOnly bool `json:"container_only" yaml:"container_only"` // Deprecated, use InstanceOnly. - - // Target for the migration, will use pull mode if not set (migration only) - Target *InstancePostTarget `json:"target" yaml:"target"` - - // Target pool for local cross-pool move - // Example: baz - // - // API extension: instance_pool_move - Pool string `json:"pool" yaml:"pool"` - - // Target project for local cross-project move - // Example: foo - // - // API extension: instance_project_move - Project string `json:"project" yaml:"project"` - - // AllowInconsistent allow inconsistent copies when migrating. - // Example: false - // - // API extension: instance_allow_inconsistent_copy - AllowInconsistent bool `json:"allow_inconsistent" yaml:"allow_inconsistent"` -} - -// InstancePostTarget represents the migration target host and operation. -// -// swagger:model -// -// API extension: instances. -type InstancePostTarget struct { - // The certificate of the migration target - // Example: X509 PEM certificate - Certificate string `json:"certificate" yaml:"certificate"` - - // The operation URL on the remote target - // Example: https://1.2.3.4:8443/1.0/operations/5e8e1638-5345-4c2d-bac9-2c79c8577292 - Operation string `json:"operation,omitempty" yaml:"operation,omitempty"` - - // Migration websockets credentials - // Example: {"migration": "random-string", "criu": "random-string"} - Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"` -} - -// InstancePut represents the modifiable fields of a LXD instance. -// -// swagger:model -// -// API extension: instances. -type InstancePut struct { - // Architecture name - // Example: x86_64 - Architecture string `json:"architecture" yaml:"architecture"` - - // Instance configuration (see doc/instances.md) - // Example: {"security.nesting": "true"} - Config map[string]string `json:"config" yaml:"config"` - - // Instance devices (see doc/instances.md) - // Example: {"root": {"type": "disk", "pool": "default", "path": "/"}} - Devices map[string]map[string]string `json:"devices" yaml:"devices"` - - // Whether the instance is ephemeral (deleted on shutdown) - // Example: false - Ephemeral bool `json:"ephemeral" yaml:"ephemeral"` - - // List of profiles applied to the instance - // Example: ["default"] - Profiles []string `json:"profiles" yaml:"profiles"` - - // If set, instance will be restored to the provided snapshot name - // Example: snap0 - Restore string `json:"restore,omitempty" yaml:"restore,omitempty"` - - // Whether the instance currently has saved state on disk - // Example: false - Stateful bool `json:"stateful" yaml:"stateful"` - - // Instance description - // Example: My test instance - Description string `json:"description" yaml:"description"` -} - -// Instance represents a LXD instance. -// -// swagger:model -// -// API extension: instances. -type Instance struct { - InstancePut `yaml:",inline"` - - // Instance creation timestamp - // Example: 2021-03-23T20:00:00-04:00 - CreatedAt time.Time `json:"created_at" yaml:"created_at"` - - // Expanded configuration (all profiles and local config merged) - // Example: {"security.nesting": "true"} - ExpandedConfig map[string]string `json:"expanded_config,omitempty" yaml:"expanded_config,omitempty"` - - // Expanded devices (all profiles and local devices merged) - // Example: {"root": {"type": "disk", "pool": "default", "path": "/"}} - ExpandedDevices map[string]map[string]string `json:"expanded_devices,omitempty" yaml:"expanded_devices,omitempty"` - - // Instance name - // Example: foo - Name string `json:"name" yaml:"name"` - - // Instance status (see instance_state) - // Example: Running - Status string `json:"status" yaml:"status"` - - // Instance status code (see instance_state) - // Example: 101 - StatusCode StatusCode `json:"status_code" yaml:"status_code"` - - // Last start timestamp - // Example: 2021-03-23T20:00:00-04:00 - LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"` - - // What cluster member this instance is located on - // Example: lxd01 - Location string `json:"location" yaml:"location"` - - // The type of instance (container or virtual-machine) - // Example: container - Type string `json:"type" yaml:"type"` - - // Instance project name - // Example: foo - // - // API extension: instance_all_projects - Project string `json:"project" yaml:"project"` -} - -// InstanceFull is a combination of Instance, InstanceBackup, InstanceState and InstanceSnapshot. -// -// swagger:model -// -// API extension: instances. -type InstanceFull struct { - Instance `yaml:",inline"` - - // List of backups. - Backups []InstanceBackup `json:"backups" yaml:"backups"` - - // Current state. - State *InstanceState `json:"state" yaml:"state"` - - // List of snapshots. - Snapshots []InstanceSnapshot `json:"snapshots" yaml:"snapshots"` -} - -// Writable converts a full Instance struct into a InstancePut struct (filters read-only fields). -// -// API extension: instances. -func (c *Instance) Writable() InstancePut { - return c.InstancePut -} - -// IsActive checks whether the instance state indicates the instance is active. -// -// API extension: instances. -func (c Instance) IsActive() bool { - switch c.StatusCode { - case Stopped: - return false - case Error: - return false - default: - return true - } -} - -// URL returns the URL for the instance. -func (c *Instance) URL(apiVersion string, project string) *URL { - return NewURL().Path(apiVersion, "instances", c.Name).Project(project) -} - -// InstanceSource represents the creation source for a new instance. -// -// swagger:model -// -// API extension: instances. -type InstanceSource struct { - // Source type - // Example: image - Type string `json:"type" yaml:"type"` - - // Certificate (for remote images or migration) - // Example: X509 PEM certificate - Certificate string `json:"certificate" yaml:"certificate"` - - // Image alias name (for image source) - // Example: ubuntu/22.04 - Alias string `json:"alias,omitempty" yaml:"alias,omitempty"` - - // Image fingerprint (for image source) - // Example: ed56997f7c5b48e8d78986d2467a26109be6fb9f2d92e8c7b08eb8b6cec7629a - Fingerprint string `json:"fingerprint,omitempty" yaml:"fingerprint,omitempty"` - - // Image filters (for image source) - // Example: {"os": "Ubuntu", "release": "jammy", "variant": "cloud"} - Properties map[string]string `json:"properties,omitempty" yaml:"properties,omitempty"` - - // Remote server URL (for remote images) - // Example: https://images.linuxcontainers.org - Server string `json:"server,omitempty" yaml:"server,omitempty"` - - // Remote server secret (for remote private images) - // Example: RANDOM-STRING - Secret string `json:"secret,omitempty" yaml:"secret,omitempty"` - - // Protocol name (for remote image) - // Example: simplestreams - Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"` - - // Base image fingerprint (for faster migration) - // Example: ed56997f7c5b48e8d78986d2467a26109be6fb9f2d92e8c7b08eb8b6cec7629a - BaseImage string `json:"base-image,omitempty" yaml:"base-image,omitempty"` - - // Whether to use pull or push mode (for migration) - // Example: pull - Mode string `json:"mode,omitempty" yaml:"mode,omitempty"` - - // Remote operation URL (for migration) - // Example: https://1.2.3.4:8443/1.0/operations/1721ae08-b6a8-416a-9614-3f89302466e1 - Operation string `json:"operation,omitempty" yaml:"operation,omitempty"` - - // Map of migration websockets (for migration) - // Example: {"criu": "RANDOM-STRING", "rsync": "RANDOM-STRING"} - Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"` - - // Existing instance name or snapshot (for copy) - // Example: foo/snap0 - Source string `json:"source,omitempty" yaml:"source,omitempty"` - - // Whether this is a live migration (for migration) - // Example: false - Live bool `json:"live,omitempty" yaml:"live,omitempty"` - - // Whether the copy should skip the snapshots (for copy) - // Example: false - InstanceOnly bool `json:"instance_only,omitempty" yaml:"instance_only,omitempty"` - - // Whether the copy should skip the snapshots (for copy, deprecated, use instance_only) - // Example: false - ContainerOnly bool `json:"container_only,omitempty" yaml:"container_only,omitempty"` // Deprecated, use InstanceOnly. - - // Whether this is refreshing an existing instance (for migration and copy) - // Example: false - Refresh bool `json:"refresh,omitempty" yaml:"refresh,omitempty"` - - // Source project name (for copy and local image) - // Example: blah - Project string `json:"project,omitempty" yaml:"project,omitempty"` - - // Whether to ignore errors when copying (e.g. for volatile files) - // Example: false - // - // API extension: instance_allow_inconsistent_copy - AllowInconsistent bool `json:"allow_inconsistent" yaml:"allow_inconsistent"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_backup.go b/vendor/github.com/lxc/lxd/shared/api/instance_backup.go deleted file mode 100644 index 5dfa5549..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/instance_backup.go +++ /dev/null @@ -1,80 +0,0 @@ -package api - -import ( - "time" -) - -// InstanceBackupsPost represents the fields available for a new LXD instance backup. -// -// swagger:model -// -// API extension: instances. -type InstanceBackupsPost struct { - // Backup name - // Example: backup0 - Name string `json:"name" yaml:"name"` - - // When the backup expires (gets auto-deleted) - // Example: 2021-03-23T17:38:37.753398689-04:00 - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` - - // Whether to ignore snapshots - // Example: false - InstanceOnly bool `json:"instance_only" yaml:"instance_only"` - - // Whether to ignore snapshots (deprecated, use instance_only) - // Example: false - ContainerOnly bool `json:"container_only" yaml:"container_only"` // Deprecated, use InstanceOnly. - - // Whether to use a pool-optimized binary format (instead of plain tarball) - // Example: true - OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"` - - // What compression algorithm to use - // Example: gzip - // - // API extension: backup_compression_algorithm - CompressionAlgorithm string `json:"compression_algorithm" yaml:"compression_algorithm"` -} - -// InstanceBackup represents a LXD instance backup. -// -// swagger:model -// -// API extension: instances. -type InstanceBackup struct { - // Backup name - // Example: backup0 - Name string `json:"name" yaml:"name"` - - // When the backup was created - // Example: 2021-03-23T16:38:37.753398689-04:00 - CreatedAt time.Time `json:"created_at" yaml:"created_at"` - - // When the backup expires (gets auto-deleted) - // Example: 2021-03-23T17:38:37.753398689-04:00 - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` - - // Whether to ignore snapshots - // Example: false - InstanceOnly bool `json:"instance_only" yaml:"instance_only"` - - // Whether to ignore snapshots (deprecated, use instance_only) - // Example: false - ContainerOnly bool `json:"container_only" yaml:"container_only"` // Deprecated, use InstanceOnly. - - // Whether to use a pool-optimized binary format (instead of plain tarball) - // Example: true - OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"` -} - -// InstanceBackupPost represents the fields available for the renaming of a instance backup. -// -// swagger:model -// -// API extension: instances. -type InstanceBackupPost struct { - // New backup name - // Example: backup1 - Name string `json:"name" yaml:"name"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_console.go b/vendor/github.com/lxc/lxd/shared/api/instance_console.go deleted file mode 100644 index 0d74a371..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/instance_console.go +++ /dev/null @@ -1,30 +0,0 @@ -package api - -// InstanceConsoleControl represents a message on the instance console "control" socket. -// -// API extension: instances. -type InstanceConsoleControl struct { - Command string `json:"command" yaml:"command"` - Args map[string]string `json:"args" yaml:"args"` -} - -// InstanceConsolePost represents a LXD instance console request. -// -// swagger:model -// -// API extension: instances. -type InstanceConsolePost struct { - // Console width in columns (console type only) - // Example: 80 - Width int `json:"width" yaml:"width"` - - // Console height in rows (console type only) - // Example: 24 - Height int `json:"height" yaml:"height"` - - // Type of console to attach to (console or vga) - // Example: console - // - // API extension: console_vga_type - Type string `json:"type" yaml:"type"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_exec.go b/vendor/github.com/lxc/lxd/shared/api/instance_exec.go deleted file mode 100644 index 32616f36..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/instance_exec.go +++ /dev/null @@ -1,56 +0,0 @@ -package api - -// InstanceExecControl represents a message on the instance exec "control" socket. -// -// API extension: instances. -type InstanceExecControl struct { - Command string `json:"command" yaml:"command"` - Args map[string]string `json:"args" yaml:"args"` - Signal int `json:"signal" yaml:"signal"` -} - -// InstanceExecPost represents a LXD instance exec request. -// -// swagger:model -// -// API extension: instances. -type InstanceExecPost struct { - // Command and its arguments - // Example: ["bash"] - Command []string `json:"command" yaml:"command"` - - // Whether to wait for all websockets to be connected before spawning the command - // Example: true - WaitForWS bool `json:"wait-for-websocket" yaml:"wait-for-websocket"` - - // Whether the command is to be spawned in interactive mode (singled PTY instead of 3 PIPEs) - // Example: true - Interactive bool `json:"interactive" yaml:"interactive"` - - // Additional environment to pass to the command - // Example: {"FOO": "BAR"} - Environment map[string]string `json:"environment" yaml:"environment"` - - // Terminal width in characters (for interactive) - // Example: 80 - Width int `json:"width" yaml:"width"` - - // Terminal height in rows (for interactive) - // Example: 24 - Height int `json:"height" yaml:"height"` - - // Whether to capture the output for later download (requires non-interactive) - RecordOutput bool `json:"record-output" yaml:"record-output"` - - // UID of the user to spawn the command as - // Example: 1000 - User uint32 `json:"user" yaml:"user"` - - // GID of the user to spawn the command as - // Example: 1000 - Group uint32 `json:"group" yaml:"group"` - - // Current working directory for the command - // Example: /home/foo/ - Cwd string `json:"cwd" yaml:"cwd"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_snapshot.go b/vendor/github.com/lxc/lxd/shared/api/instance_snapshot.go deleted file mode 100644 index 284a7692..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/instance_snapshot.go +++ /dev/null @@ -1,126 +0,0 @@ -package api - -import ( - "time" -) - -// InstanceSnapshotsPost represents the fields available for a new LXD instance snapshot. -// -// swagger:model -// -// API extension: instances. -type InstanceSnapshotsPost struct { - // Snapshot name - // Example: snap0 - Name string `json:"name" yaml:"name"` - - // Whether the snapshot should include runtime state - // Example: false - Stateful bool `json:"stateful" yaml:"stateful"` - - // When the snapshot expires (gets auto-deleted) - // Example: 2021-03-23T17:38:37.753398689-04:00 - // - // API extension: snapshot_expiry_creation - ExpiresAt *time.Time `json:"expires_at" yaml:"expires_at"` -} - -// InstanceSnapshotPost represents the fields required to rename/move a LXD instance snapshot. -// -// swagger:model -// -// API extension: instances. -type InstanceSnapshotPost struct { - // New name for the snapshot - // Example: foo - Name string `json:"name" yaml:"name"` - - // Whether this is a migration request - // Example: false - Migration bool `json:"migration" yaml:"migration"` - - // Migration target for push migration (requires migration) - Target *InstancePostTarget `json:"target" yaml:"target"` - - // Whether to perform a live migration (requires migration) - // Example: false - Live bool `json:"live,omitempty" yaml:"live,omitempty"` -} - -// InstanceSnapshotPut represents the modifiable fields of a LXD instance snapshot. -// -// swagger:model -// -// API extension: instances. -type InstanceSnapshotPut struct { - // When the snapshot expires (gets auto-deleted) - // Example: 2021-03-23T17:38:37.753398689-04:00 - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` -} - -// InstanceSnapshot represents a LXD instance snapshot. -// -// swagger:model -// -// API extension: instances. -type InstanceSnapshot struct { - InstanceSnapshotPut `yaml:",inline"` - - // Architecture name - // Example: x86_64 - Architecture string `json:"architecture" yaml:"architecture"` - - // Instance configuration (see doc/instances.md) - // Example: {"security.nesting": "true"} - Config map[string]string `json:"config" yaml:"config"` - - // Instance creation timestamp - // Example: 2021-03-23T20:00:00-04:00 - CreatedAt time.Time `json:"created_at" yaml:"created_at"` - - // Instance devices (see doc/instances.md) - // Example: {"root": {"type": "disk", "pool": "default", "path": "/"}} - Devices map[string]map[string]string `json:"devices" yaml:"devices"` - - // Whether the instance is ephemeral (deleted on shutdown) - // Example: false - Ephemeral bool `json:"ephemeral" yaml:"ephemeral"` - - // Expanded configuration (all profiles and local config merged) - // Example: {"security.nesting": "true"} - ExpandedConfig map[string]string `json:"expanded_config,omitempty" yaml:"expanded_config,omitempty"` - - // Expanded devices (all profiles and local devices merged) - // Example: {"root": {"type": "disk", "pool": "default", "path": "/"}} - ExpandedDevices map[string]map[string]string `json:"expanded_devices,omitempty" yaml:"expanded_devices,omitempty"` - - // Last start timestamp - // Example: 2021-03-23T20:00:00-04:00 - LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"` - - // Snapshot name - // Example: foo - Name string `json:"name" yaml:"name"` - - // List of profiles applied to the instance - // Example: ["default"] - Profiles []string `json:"profiles" yaml:"profiles"` - - // Whether the instance currently has saved state on disk - // Example: false - Stateful bool `json:"stateful" yaml:"stateful"` - - // Size of the snapshot in bytes - // Example: 143360 - // - // API extension: snapshot_disk_usage - Size int64 `json:"size" yaml:"size"` -} - -// Writable converts a full InstanceSnapshot struct into a InstanceSnapshotPut struct -// (filters read-only fields). -// -// API extension: instances. -func (c *InstanceSnapshot) Writable() InstanceSnapshotPut { - return c.InstanceSnapshotPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_state.go b/vendor/github.com/lxc/lxd/shared/api/instance_state.go deleted file mode 100644 index f1ce0f48..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/instance_state.go +++ /dev/null @@ -1,201 +0,0 @@ -package api - -// InstanceStatePut represents the modifiable fields of a LXD instance's state. -// -// swagger:model -// -// API extension: instances. -type InstanceStatePut struct { - // State change action (start, stop, restart, freeze, unfreeze) - // Example: start - Action string `json:"action" yaml:"action"` - - // How long to wait (in s) before giving up (when force isn't set) - // Example: 30 - Timeout int `json:"timeout" yaml:"timeout"` - - // Whether to force the action (for stop and restart) - // Example: false - Force bool `json:"force" yaml:"force"` - - // Whether to store the runtime state (for stop) - // Example: false - Stateful bool `json:"stateful" yaml:"stateful"` -} - -// InstanceState represents a LXD instance's state. -// -// swagger:model -// -// API extension: instances. -type InstanceState struct { - // Current status (Running, Stopped, Frozen or Error) - // Example: Running - Status string `json:"status" yaml:"status"` - - // Numeric status code (101, 102, 110, 112) - // Example: 101 - StatusCode StatusCode `json:"status_code" yaml:"status_code"` - - // Dict of disk usage - Disk map[string]InstanceStateDisk `json:"disk" yaml:"disk"` - - // Memory usage information - Memory InstanceStateMemory `json:"memory" yaml:"memory"` - - // Dict of network usage - Network map[string]InstanceStateNetwork `json:"network" yaml:"network"` - - // PID of the runtime - // Example: 7281 - Pid int64 `json:"pid" yaml:"pid"` - - // Number of processes in the instance - // Example: 50 - Processes int64 `json:"processes" yaml:"processes"` - - // CPU usage information - CPU InstanceStateCPU `json:"cpu" yaml:"cpu"` -} - -// InstanceStateDisk represents the disk information section of a LXD instance's state. -// -// swagger:model -// -// API extension: instances. -type InstanceStateDisk struct { - // Disk usage in bytes - // Example: 502239232 - Usage int64 `json:"usage" yaml:"usage"` -} - -// InstanceStateCPU represents the cpu information section of a LXD instance's state. -// -// swagger:model -// -// API extension: instances. -type InstanceStateCPU struct { - // CPU usage in nanoseconds - // Example: 3637691016 - Usage int64 `json:"usage" yaml:"usage"` -} - -// InstanceStateMemory represents the memory information section of a LXD instance's state. -// -// swagger:model -// -// API extension: instances. -type InstanceStateMemory struct { - // Memory usage in bytes - // Example: 73248768 - Usage int64 `json:"usage" yaml:"usage"` - - // Peak memory usage in bytes - // Example: 73785344 - UsagePeak int64 `json:"usage_peak" yaml:"usage_peak"` - - // SWAP usage in bytes - // Example: 12297557 - SwapUsage int64 `json:"swap_usage" yaml:"swap_usage"` - - // Peak SWAP usage in bytes - // Example: 12297557 - SwapUsagePeak int64 `json:"swap_usage_peak" yaml:"swap_usage_peak"` -} - -// InstanceStateNetwork represents the network information section of a LXD instance's state. -// -// swagger:model -// -// API extension: instances. -type InstanceStateNetwork struct { - // List of IP addresses - Addresses []InstanceStateNetworkAddress `json:"addresses" yaml:"addresses"` - - // Traffic counters - Counters InstanceStateNetworkCounters `json:"counters" yaml:"counters"` - - // MAC address - // Example: 00:16:3e:0c:ee:dd - Hwaddr string `json:"hwaddr" yaml:"hwaddr"` - - // Name of the interface on the host - // Example: vethbbcd39c7 - HostName string `json:"host_name" yaml:"host_name"` - - // MTU (maximum transmit unit) for the interface - // Example: 1500 - Mtu int `json:"mtu" yaml:"mtu"` - - // Administrative state of the interface (up/down) - // Example: up - State string `json:"state" yaml:"state"` - - // Type of interface (broadcast, loopback, point-to-point, ...) - // Example: broadcast - Type string `json:"type" yaml:"type"` -} - -// InstanceStateNetworkAddress represents a network address as part of the network section of a LXD -// instance's state. -// -// swagger:model -// -// API extension: instances. -type InstanceStateNetworkAddress struct { - // Network family (inet or inet6) - // Example: inet6 - Family string `json:"family" yaml:"family"` - - // IP address - // Example: fd42:4c81:5770:1eaf:216:3eff:fe0c:eedd - Address string `json:"address" yaml:"address"` - - // Network mask - // Example: 64 - Netmask string `json:"netmask" yaml:"netmask"` - - // Address scope (local, link or global) - // Example: global - Scope string `json:"scope" yaml:"scope"` -} - -// InstanceStateNetworkCounters represents packet counters as part of the network section of a LXD -// instance's state. -// -// swagger:model -// -// API extension: instances. -type InstanceStateNetworkCounters struct { - // Number of bytes received - // Example: 192021 - BytesReceived int64 `json:"bytes_received" yaml:"bytes_received"` - - // Number of bytes sent - // Example: 10888579 - BytesSent int64 `json:"bytes_sent" yaml:"bytes_sent"` - - // Number of packets received - // Example: 1748 - PacketsReceived int64 `json:"packets_received" yaml:"packets_received"` - - // Number of packets sent - // Example: 964 - PacketsSent int64 `json:"packets_sent" yaml:"packets_sent"` - - // Number of errors received - // Example: 14 - ErrorsReceived int64 `json:"errors_received" yaml:"errors_received"` - - // Number of errors sent - // Example: 41 - ErrorsSent int64 `json:"errors_sent" yaml:"errors_sent"` - - // Number of outbound packets dropped - // Example: 541 - PacketsDroppedOutbound int64 `json:"packets_dropped_outbound" yaml:"packets_dropped_outbound"` - - // Number of inbound packets dropped - // Example: 179 - PacketsDroppedInbound int64 `json:"packets_dropped_inbound" yaml:"packets_dropped_inbound"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/migration.go b/vendor/github.com/lxc/lxd/shared/api/migration.go deleted file mode 100644 index ae132a6d..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/migration.go +++ /dev/null @@ -1,10 +0,0 @@ -package api - -// SecretNameControl is the secret name used for the migration control connection. -const SecretNameControl = "control" - -// SecretNameFilesystem is the secret name used for the migration filesystem connection. -const SecretNameFilesystem = "fs" - -// SecretNameState is the secret name used for the migration state connection. -const SecretNameState = "criu" // Legacy value used for backward compatibility for clients. diff --git a/vendor/github.com/lxc/lxd/shared/api/network.go b/vendor/github.com/lxc/lxd/shared/api/network.go deleted file mode 100644 index bfe1eed2..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/network.go +++ /dev/null @@ -1,318 +0,0 @@ -package api - -// NetworksPost represents the fields of a new LXD network -// -// swagger:model -// -// API extension: network. -type NetworksPost struct { - NetworkPut `yaml:",inline"` - - // The name of the new network - // Example: lxdbr1 - Name string `json:"name" yaml:"name"` - - // The network type (refer to doc/networks.md) - // Example: bridge - Type string `json:"type" yaml:"type"` -} - -// NetworkPost represents the fields required to rename a LXD network -// -// swagger:model -// -// API extension: network. -type NetworkPost struct { - // The new name for the network - // Example: lxdbr1 - Name string `json:"name" yaml:"name"` -} - -// NetworkPut represents the modifiable fields of a LXD network -// -// swagger:model -// -// API extension: network. -type NetworkPut struct { - // Network configuration map (refer to doc/networks.md) - // Example: {"ipv4.address": "10.0.0.1/24", "ipv4.nat": "true", "ipv6.address": "none"} - Config map[string]string `json:"config" yaml:"config"` - - // Description of the profile - // Example: My new LXD bridge - // - // API extension: entity_description - Description string `json:"description" yaml:"description"` -} - -// NetworkStatusPending network is pending creation on other cluster nodes. -const NetworkStatusPending = "Pending" - -// NetworkStatusCreated network is fully created. -const NetworkStatusCreated = "Created" - -// NetworkStatusErrored network is in error status. -const NetworkStatusErrored = "Errored" - -// NetworkStatusUnknown network is in unknown status. -const NetworkStatusUnknown = "Unknown" - -// NetworkStatusUnavailable network failed to initialize. -const NetworkStatusUnavailable = "Unavailable" - -// Network represents a LXD network -// -// swagger:model -type Network struct { - NetworkPut `yaml:",inline"` - - // The network name - // Read only: true - // Example: lxdbr0 - Name string `json:"name" yaml:"name"` - - // The network type - // Read only: true - // Example: bridge - Type string `json:"type" yaml:"type"` - - // List of URLs of objects using this profile - // Read only: true - // Example: ["/1.0/profiles/default", "/1.0/instances/c1"] - UsedBy []string `json:"used_by" yaml:"used_by"` - - // Whether this is a LXD managed network - // Read only: true - // Example: true - // - // API extension: network - Managed bool `json:"managed" yaml:"managed"` - - // The state of the network (for managed network in clusters) - // Read only: true - // Example: Created - // - // API extension: clustering - Status string `json:"status" yaml:"status"` - - // Cluster members on which the network has been defined - // Read only: true - // Example: ["lxd01", "lxd02", "lxd03"] - // - // API extension: clustering - Locations []string `json:"locations" yaml:"locations"` -} - -// Writable converts a full Network struct into a NetworkPut struct (filters read-only fields). -func (network *Network) Writable() NetworkPut { - return network.NetworkPut -} - -// NetworkLease represents a DHCP lease -// -// swagger:model -// -// API extension: network_leases. -type NetworkLease struct { - // The hostname associated with the record - // Example: c1 - Hostname string `json:"hostname" yaml:"hostname"` - - // The MAC address - // Example: 00:16:3e:2c:89:d9 - Hwaddr string `json:"hwaddr" yaml:"hwaddr"` - - // The IP address - // Example: 10.0.0.98 - Address string `json:"address" yaml:"address"` - - // The type of record (static or dynamic) - // Example: dynamic - Type string `json:"type" yaml:"type"` - - // What cluster member this record was found on - // Example: lxd01 - // - // API extension: network_leases_location - Location string `json:"location" yaml:"location"` -} - -// NetworkState represents the network state -// -// swagger:model -type NetworkState struct { - // List of addresses - Addresses []NetworkStateAddress `json:"addresses" yaml:"addresses"` - - // Interface counters - Counters NetworkStateCounters `json:"counters" yaml:"counters"` - - // MAC address - // Example: 00:16:3e:5a:83:57 - Hwaddr string `json:"hwaddr" yaml:"hwaddr"` - - // MTU - // Example: 1500 - Mtu int `json:"mtu" yaml:"mtu"` - - // Link state - // Example: up - State string `json:"state" yaml:"state"` - - // Interface type - // Example: broadcast - Type string `json:"type" yaml:"type"` - - // Additional bond interface information - // - // API extension: network_state_bond_bridge - Bond *NetworkStateBond `json:"bond" yaml:"bond"` - - // Additional bridge interface information - // - // API extension: network_state_bond_bridge - Bridge *NetworkStateBridge `json:"bridge" yaml:"bridge"` - - // Additional vlan interface information - // - // API extension: network_state_vlan - VLAN *NetworkStateVLAN `json:"vlan" yaml:"vlan"` - - // Additional OVN network information - // - // API extension: network_state_ovn - OVN *NetworkStateOVN `json:"ovn" yaml:"ovn"` -} - -// NetworkStateAddress represents a network address -// -// swagger:model -type NetworkStateAddress struct { - // Address family - // Example: inet - Family string `json:"family" yaml:"family"` - - // IP address - // Example: 10.0.0.1 - Address string `json:"address" yaml:"address"` - - // IP netmask (CIDR) - // Example: 24 - Netmask string `json:"netmask" yaml:"netmask"` - - // Address scope - // Example: global - Scope string `json:"scope" yaml:"scope"` -} - -// NetworkStateCounters represents packet counters -// -// swagger:model -type NetworkStateCounters struct { - // Number of bytes received - // Example: 250542118 - BytesReceived int64 `json:"bytes_received" yaml:"bytes_received"` - - // Number of bytes sent - // Example: 17524040140 - BytesSent int64 `json:"bytes_sent" yaml:"bytes_sent"` - - // Number of packets received - // Example: 1182515 - PacketsReceived int64 `json:"packets_received" yaml:"packets_received"` - - // Number of packets sent - // Example: 1567934 - PacketsSent int64 `json:"packets_sent" yaml:"packets_sent"` -} - -// NetworkStateBond represents bond specific state -// -// swagger:model -// -// API extension: network_state_bond_bridge. -type NetworkStateBond struct { - // Bonding mode - // Example: 802.3ad - Mode string `json:"mode" yaml:"mode"` - - // Transmit balancing policy - // Example: layer3+4 - TransmitPolicy string `json:"transmit_policy" yaml:"transmit_policy"` - - // Delay on link up (ms) - // Example: 0 - UpDelay uint64 `json:"up_delay" yaml:"up_delay"` - - // Delay on link down (ms) - // Example: 0 - DownDelay uint64 `json:"down_delay" yaml:"down_delay"` - - // How often to check for link state (ms) - // Example: 100 - MIIFrequency uint64 `json:"mii_frequency" yaml:"mii_frequency"` - - // Bond link state - // Example: up - MIIState string `json:"mii_state" yaml:"mii_state"` - - // List of devices that are part of the bond - // Example: ["eth0", "eth1"] - LowerDevices []string `json:"lower_devices" yaml:"lower_devices"` -} - -// NetworkStateBridge represents bridge specific state -// -// swagger:model -// -// API extension: network_state_bond_bridge. -type NetworkStateBridge struct { - // Bridge ID - // Example: 8000.0a0f7c6edbd9 - ID string `json:"id" yaml:"id"` - - // Whether STP is enabled - // Example: false - STP bool `json:"stp" yaml:"stp"` - - // Delay on port join (ms) - // Example: 1500 - ForwardDelay uint64 `json:"forward_delay" yaml:"forward_delay"` - - // Default VLAN ID - // Example: 1 - VLANDefault uint64 `json:"vlan_default" yaml:"vlan_default"` - - // Whether VLAN filtering is enabled - // Example: false - VLANFiltering bool `json:"vlan_filtering" yaml:"vlan_filtering"` - - // List of devices that are in the bridge - // Example: ["eth0", "eth1"] - UpperDevices []string `json:"upper_devices" yaml:"upper_devices"` -} - -// NetworkStateVLAN represents VLAN specific state -// -// swagger:model -// -// API extension: network_state_vlan. -type NetworkStateVLAN struct { - // Parent device - // Example: eth0 - LowerDevice string `json:"lower_device" yaml:"lower_device"` - - // VLAN ID - // Example: 100 - VID uint64 `json:"vid" yaml:"vid"` -} - -// NetworkStateOVN represents OVN specific state -// -// swagger:model -// -// API extension: network_state_ovn. -type NetworkStateOVN struct { - // OVN network chassis name - Chassis string `json:"chassis" yaml:"chassis"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/network_acl.go b/vendor/github.com/lxc/lxd/shared/api/network_acl.go deleted file mode 100644 index 842d4882..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/network_acl.go +++ /dev/null @@ -1,157 +0,0 @@ -package api - -import ( - "strings" -) - -// NetworkACLRule represents a single rule in an ACL ruleset. -// Refer to doc/network-acls.md for details. -// -// swagger:model -// -// API extension: network_acl. -type NetworkACLRule struct { - // Action to perform on rule match - // Example: allow - Action string `json:"action" yaml:"action"` - - // Source address - // Example: @internal - Source string `json:"source,omitempty" yaml:"source,omitempty"` - - // Destination address - // Example: 8.8.8.8/32,8.8.4.4/32 - Destination string `json:"destination,omitempty" yaml:"destination,omitempty"` - - // Protocol - // Example: udp - Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"` - - // Source port - // Example: 1234 - SourcePort string `json:"source_port,omitempty" yaml:"source_port,omitempty"` - - // Destination port - // Example: 53 - DestinationPort string `json:"destination_port,omitempty" yaml:"destination_port,omitempty"` - - // Type of ICMP message (for ICMP protocol) - // Example: 8 - ICMPType string `json:"icmp_type,omitempty" yaml:"icmp_type,omitempty"` - - // ICMP message code (for ICMP protocol) - // Example: 0 - ICMPCode string `json:"icmp_code,omitempty" yaml:"icmp_code,omitempty"` - - // Description of the rule - // Example: Allow DNS queries to Google DNS - Description string `json:"description,omitempty" yaml:"description,omitempty"` - - // State of the rule - // Example: enabled - State string `json:"state" yaml:"state"` -} - -// Normalise normalises the fields in the rule so that they are comparable with ones stored. -func (r *NetworkACLRule) Normalise() { - r.Action = strings.TrimSpace(r.Action) - r.Protocol = strings.TrimSpace(r.Protocol) - r.ICMPType = strings.TrimSpace(r.ICMPType) - r.ICMPCode = strings.TrimSpace(r.ICMPCode) - r.Description = strings.TrimSpace(r.Description) - r.State = strings.TrimSpace(r.State) - - // Remove space from Source subject list. - subjects := strings.Split(r.Source, ",") - for i, s := range subjects { - subjects[i] = strings.TrimSpace(s) - } - - r.Source = strings.Join(subjects, ",") - - // Remove space from Destination subject list. - subjects = strings.Split(r.Destination, ",") - for i, s := range subjects { - subjects[i] = strings.TrimSpace(s) - } - - r.Destination = strings.Join(subjects, ",") - - // Remove space from SourcePort port list. - ports := strings.Split(r.SourcePort, ",") - for i, s := range ports { - ports[i] = strings.TrimSpace(s) - } - - r.SourcePort = strings.Join(ports, ",") - - // Remove space from DestinationPort port list. - ports = strings.Split(r.DestinationPort, ",") - for i, s := range ports { - ports[i] = strings.TrimSpace(s) - } - - r.DestinationPort = strings.Join(ports, ",") -} - -// NetworkACLPost used for renaming an ACL. -// -// swagger:model -// -// API extension: network_acl. -type NetworkACLPost struct { - // The new name for the ACL - // Example: bar - Name string `json:"name" yaml:"name"` // Name of ACL. -} - -// NetworkACLPut used for updating an ACL. -// -// swagger:model -// -// API extension: network_acl. -type NetworkACLPut struct { - // Description of the ACL - // Example: Web servers - Description string `json:"description" yaml:"description"` - - // List of egress rules (order independent) - Egress []NetworkACLRule `json:"egress" yaml:"egress"` - - // List of ingress rules (order independent) - Ingress []NetworkACLRule `json:"ingress" yaml:"ingress"` - - // ACL configuration map (refer to doc/network-acls.md) - // Example: {"user.mykey": "foo"} - Config map[string]string `json:"config" yaml:"config"` -} - -// NetworkACL used for displaying an ACL. -// -// swagger:model -// -// API extension: network_acl. -type NetworkACL struct { - NetworkACLPost `yaml:",inline"` - NetworkACLPut `yaml:",inline"` - - // List of URLs of objects using this profile - // Read only: true - // Example: ["/1.0/instances/c1", "/1.0/instances/v1", "/1.0/networks/lxdbr0"] - UsedBy []string `json:"used_by" yaml:"used_by"` // Resources that use the ACL. -} - -// Writable converts a full NetworkACL struct into a NetworkACLPut struct (filters read-only fields). -func (acl *NetworkACL) Writable() NetworkACLPut { - return acl.NetworkACLPut -} - -// NetworkACLsPost used for creating an ACL. -// -// swagger:model -// -// API extension: network_acl. -type NetworkACLsPost struct { - NetworkACLPost `yaml:",inline"` - NetworkACLPut `yaml:",inline"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/network_forward.go b/vendor/github.com/lxc/lxd/shared/api/network_forward.go deleted file mode 100644 index 3ecca813..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/network_forward.go +++ /dev/null @@ -1,143 +0,0 @@ -package api - -import ( - "net" - "strings" -) - -// NetworkForwardPort represents a port specification in a network address forward -// -// swagger:model -// -// API extension: network_forward. -type NetworkForwardPort struct { - // Description of the forward port - // Example: My web server forward - Description string `json:"description" yaml:"description"` - - // Protocol for port forward (either tcp or udp) - // Example: tcp - Protocol string `json:"protocol" yaml:"protocol"` - - // ListenPort(s) to forward (comma delimited ranges) - // Example: 80,81,8080-8090 - ListenPort string `json:"listen_port" yaml:"listen_port"` - - // TargetPort(s) to forward ListenPorts to (allows for many-to-one) - // Example: 80,81,8080-8090 - TargetPort string `json:"target_port" yaml:"target_port"` - - // TargetAddress to forward ListenPorts to - // Example: 198.51.100.2 - TargetAddress string `json:"target_address" yaml:"target_address"` -} - -// Normalise normalises the fields in the rule so that they are comparable with ones stored. -func (p *NetworkForwardPort) Normalise() { - p.Description = strings.TrimSpace(p.Description) - p.Protocol = strings.TrimSpace(p.Protocol) - p.TargetAddress = strings.TrimSpace(p.TargetAddress) - - ip := net.ParseIP(p.TargetAddress) - if ip != nil { - p.TargetAddress = ip.String() // Replace with canonical form if specified. - } - - // Remove space from ListenPort list. - subjects := strings.Split(p.ListenPort, ",") - for i, s := range subjects { - subjects[i] = strings.TrimSpace(s) - } - - p.ListenPort = strings.Join(subjects, ",") - - // Remove space from TargetPort list. - subjects = strings.Split(p.TargetPort, ",") - for i, s := range subjects { - subjects[i] = strings.TrimSpace(s) - } - - p.TargetPort = strings.Join(subjects, ",") -} - -// NetworkForwardsPost represents the fields of a new LXD network address forward -// -// swagger:model -// -// API extension: network_forward. -type NetworkForwardsPost struct { - NetworkForwardPut `yaml:",inline"` - - // The listen address of the forward - // Example: 192.0.2.1 - ListenAddress string `json:"listen_address" yaml:"listen_address"` -} - -// Normalise normalises the fields in the rule so that they are comparable with ones stored. -func (f *NetworkForwardsPost) Normalise() { - ip := net.ParseIP(f.ListenAddress) - if ip != nil { - f.ListenAddress = ip.String() // Replace with canonical form if specified. - } - - f.NetworkForwardPut.Normalise() -} - -// NetworkForwardPut represents the modifiable fields of a LXD network address forward -// -// swagger:model -// -// API extension: network_forward. -type NetworkForwardPut struct { - // Description of the forward listen IP - // Example: My public IP forward - Description string `json:"description" yaml:"description"` - - // Forward configuration map (refer to doc/network-forwards.md) - // Example: {"user.mykey": "foo"} - Config map[string]string `json:"config" yaml:"config"` - - // Port forwards (optional) - Ports []NetworkForwardPort `json:"ports" yaml:"ports"` -} - -// Normalise normalises the fields in the rule so that they are comparable with ones stored. -func (f *NetworkForwardPut) Normalise() { - f.Description = strings.TrimSpace(f.Description) - - ip := net.ParseIP(f.Config["target_address"]) - if ip != nil { - f.Config["target_address"] = ip.String() // Replace with canonical form if specified. - } - - for i := range f.Ports { - f.Ports[i].Normalise() - } -} - -// NetworkForward used for displaying an network address forward. -// -// swagger:model -// -// API extension: network_forward. -type NetworkForward struct { - NetworkForwardPut `yaml:",inline"` - - // The listen address of the forward - // Example: 192.0.2.1 - ListenAddress string `json:"listen_address" yaml:"listen_address"` - - // What cluster member this record was found on - // Example: lxd01 - Location string `json:"location" yaml:"location"` -} - -// Etag returns the values used for etag generation. -func (f *NetworkForward) Etag() []any { - return []any{f.ListenAddress, f.Description, f.Config, f.Ports} -} - -// Writable converts a full NetworkForward struct into a NetworkForwardPut struct (filters read-only fields). -func (f *NetworkForward) Writable() NetworkForwardPut { - return f.NetworkForwardPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/network_load_balancer.go b/vendor/github.com/lxc/lxd/shared/api/network_load_balancer.go deleted file mode 100644 index e54306f1..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/network_load_balancer.go +++ /dev/null @@ -1,159 +0,0 @@ -package api - -import ( - "net" - "strings" -) - -// NetworkLoadBalancerBackend represents a target backend specification in a network load balancer -// -// swagger:model -// -// API extension: network_load_balancer. -type NetworkLoadBalancerBackend struct { - // Name of the load balancer backend - // Example: c1-http - Name string `json:"name" yaml:"name"` - - // Description of the load balancer backend - // Example: C1 webserver - Description string `json:"description" yaml:"description"` - - // TargetPort(s) to forward ListenPorts to (allows for many-to-one) - // Example: 80,81,8080-8090 - TargetPort string `json:"target_port" yaml:"target_port"` - - // TargetAddress to forward ListenPorts to - // Example: 198.51.100.2 - TargetAddress string `json:"target_address" yaml:"target_address"` -} - -// Normalise normalises the fields in the load balancer backend so that they are comparable with ones stored. -func (p *NetworkLoadBalancerBackend) Normalise() { - p.Description = strings.TrimSpace(p.Description) - p.TargetAddress = strings.TrimSpace(p.TargetAddress) - - ip := net.ParseIP(p.TargetAddress) - if ip != nil { - p.TargetAddress = ip.String() // Replace with canonical form if specified. - } - - // Remove space from TargetPort list. - subjects := strings.Split(p.TargetPort, ",") - for i, s := range subjects { - subjects[i] = strings.TrimSpace(s) - } - - p.TargetPort = strings.Join(subjects, ",") -} - -// NetworkLoadBalancerPort represents a port specification in a network load balancer -// -// swagger:model -// -// API extension: network_load_balancer. -type NetworkLoadBalancerPort struct { - // Description of the load balancer port - // Example: My web server load balancer - Description string `json:"description" yaml:"description"` - - // Protocol for load balancer port (either tcp or udp) - // Example: tcp - Protocol string `json:"protocol" yaml:"protocol"` - - // ListenPort(s) of load balancer (comma delimited ranges) - // Example: 80,81,8080-8090 - ListenPort string `json:"listen_port" yaml:"listen_port"` - - // TargetBackend backend names to load balance ListenPorts to - // Example: ["c1-http","c2-http"] - TargetBackend []string `json:"target_backend" yaml:"target_backend"` -} - -// Normalise normalises the fields in the load balancer port so that they are comparable with ones stored. -func (p *NetworkLoadBalancerPort) Normalise() { - p.Description = strings.TrimSpace(p.Description) - p.Protocol = strings.TrimSpace(p.Protocol) - - // Remove space from ListenPort list. - subjects := strings.Split(p.ListenPort, ",") - for i, s := range subjects { - subjects[i] = strings.TrimSpace(s) - } - - p.ListenPort = strings.Join(subjects, ",") -} - -// NetworkLoadBalancersPost represents the fields of a new LXD network load balancer -// -// swagger:model -// -// API extension: network_load_balancer. -type NetworkLoadBalancersPost struct { - NetworkLoadBalancerPut `yaml:",inline"` - - // The listen address of the load balancer - // Example: 192.0.2.1 - ListenAddress string `json:"listen_address" yaml:"listen_address"` -} - -// NetworkLoadBalancerPut represents the modifiable fields of a LXD network load balancer -// -// swagger:model -// -// API extension: network_load_balancer. -type NetworkLoadBalancerPut struct { - // Description of the load balancer listen IP - // Example: My public IP load balancer - Description string `json:"description" yaml:"description"` - - // Load balancer configuration map (refer to doc/network-load-balancers.md) - // Example: {"user.mykey": "foo"} - Config map[string]string `json:"config" yaml:"config"` - - // Backends (optional) - Backends []NetworkLoadBalancerBackend `json:"backends" yaml:"backends"` - - // Port forwards (optional) - Ports []NetworkLoadBalancerPort `json:"ports" yaml:"ports"` -} - -// Normalise normalises the fields in the load balancer so that they are comparable with ones stored. -func (f *NetworkLoadBalancerPut) Normalise() { - f.Description = strings.TrimSpace(f.Description) - - for i := range f.Backends { - f.Backends[i].Normalise() - } - - for i := range f.Ports { - f.Ports[i].Normalise() - } -} - -// NetworkLoadBalancer used for displaying a network load balancer -// -// swagger:model -// -// API extension: network_load_balancer. -type NetworkLoadBalancer struct { - NetworkLoadBalancerPut `yaml:",inline"` - - // The listen address of the load balancer - // Example: 192.0.2.1 - ListenAddress string `json:"listen_address" yaml:"listen_address"` - - // What cluster member this record was found on - // Example: lxd01 - Location string `json:"location" yaml:"location"` -} - -// Etag returns the values used for etag generation. -func (f *NetworkLoadBalancer) Etag() []any { - return []any{f.ListenAddress, f.Description, f.Config, f.Backends, f.Ports} -} - -// Writable converts a full NetworkLoadBalancer struct into a NetworkLoadBalancerPut struct (filters read-only fields). -func (f *NetworkLoadBalancer) Writable() NetworkLoadBalancerPut { - return f.NetworkLoadBalancerPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/network_peer.go b/vendor/github.com/lxc/lxd/shared/api/network_peer.go deleted file mode 100644 index b6bc2635..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/network_peer.go +++ /dev/null @@ -1,81 +0,0 @@ -package api - -// NetworkPeersPost represents the fields of a new LXD network peering -// -// swagger:model -// -// API extension: network_peer. -type NetworkPeersPost struct { - NetworkPeerPut `yaml:",inline"` - - // Name of the peer - // Example: project1-network1 - Name string `json:"name" yaml:"name"` - - // Name of the target project - // Example: project1 - TargetProject string `json:"target_project" yaml:"target_project"` - - // Name of the target network - // Example: network1 - TargetNetwork string `json:"target_network" yaml:"target_network"` -} - -// NetworkPeerPut represents the modifiable fields of a LXD network peering -// -// swagger:model -// -// API extension: network_peer. -type NetworkPeerPut struct { - // Description of the peer - // Example: Peering with network1 in project1 - Description string `json:"description" yaml:"description"` - - // Peer configuration map (refer to doc/network-peers.md) - // Example: {"user.mykey": "foo"} - Config map[string]string `json:"config" yaml:"config"` -} - -// NetworkPeer used for displaying a LXD network peering. -// -// swagger:model -// -// API extension: network_forward. -type NetworkPeer struct { - NetworkPeerPut `yaml:",inline"` - - // Name of the peer - // Read only: true - // Example: project1-network1 - Name string `json:"name" yaml:"name"` - - // Name of the target project - // Read only: true - // Example: project1 - TargetProject string `json:"target_project" yaml:"target_project"` - - // Name of the target network - // Read only: true - // Example: network1 - TargetNetwork string `json:"target_network" yaml:"target_network"` - - // The state of the peering - // Read only: true - // Example: Pending - Status string `json:"status" yaml:"status"` - - // List of URLs of objects using this network peering - // Read only: true - // Example: ["/1.0/network-acls/test", "/1.0/network-acls/foo"] - UsedBy []string `json:"used_by" yaml:"used_by"` -} - -// Etag returns the values used for etag generation. -func (p *NetworkPeer) Etag() []any { - return []any{p.Name, p.Description, p.Config} -} - -// Writable converts a full NetworkPeer struct into a NetworkPeerPut struct (filters read-only fields). -func (p *NetworkPeer) Writable() NetworkPeerPut { - return p.NetworkPeerPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/network_zone.go b/vendor/github.com/lxc/lxd/shared/api/network_zone.go deleted file mode 100644 index 73407343..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/network_zone.go +++ /dev/null @@ -1,120 +0,0 @@ -package api - -// NetworkZonesPost represents the fields of a new LXD network zone -// -// swagger:model -// -// API extension: network_dns. -type NetworkZonesPost struct { - NetworkZonePut `yaml:",inline"` - - // The name of the zone (DNS domain name) - // Example: example.net - Name string `json:"name" yaml:"name"` -} - -// NetworkZonePut represents the modifiable fields of a LXD network zone -// -// swagger:model -// -// API extension: network_dns. -type NetworkZonePut struct { - // Description of the network zone - // Example: Internal domain - Description string `json:"description" yaml:"description"` - - // Zone configuration map (refer to doc/network-zones.md) - // Example: {"user.mykey": "foo"} - Config map[string]string `json:"config" yaml:"config"` -} - -// NetworkZone represents a network zone (DNS). -// -// swagger:model -// -// API extension: network_dns. -type NetworkZone struct { - NetworkZonePut `yaml:",inline"` - - // The name of the zone (DNS domain name) - // Example: example.net - Name string `json:"name" yaml:"name"` - - // List of URLs of objects using this network zone - // Read only: true - // Example: ["/1.0/networks/foo", "/1.0/networks/bar"] - UsedBy []string `json:"used_by" yaml:"used_by"` // Resources that use the zone. -} - -// Writable converts a full NetworkZone struct into a NetworkZonePut struct (filters read-only fields). -func (f *NetworkZone) Writable() NetworkZonePut { - return f.NetworkZonePut -} - -// NetworkZoneRecordsPost represents the fields of a new LXD network zone record -// -// swagger:model -// -// API extension: network_dns_records. -type NetworkZoneRecordsPost struct { - NetworkZoneRecordPut `yaml:",inline"` - - // The record name in the zone - // Example: @ - Name string `json:"name" yaml:"name"` -} - -// NetworkZoneRecordPut represents the modifiable fields of a LXD network zone record -// -// swagger:model -// -// API extension: network_dns_records. -type NetworkZoneRecordPut struct { - // Description of the record - // Example: SPF record - Description string `json:"description" yaml:"description"` - - // Entries in the record - Entries []NetworkZoneRecordEntry `json:"entries" yaml:"entries"` - - // Advanced configuration for the record - // Example: {"user.mykey": "foo"} - Config map[string]string `json:"config" yaml:"config"` -} - -// NetworkZoneRecordEntry represents the fields in a record entry -// -// swagger:model -// -// API extension: network_dns_records. -type NetworkZoneRecordEntry struct { - // Type of DNS entry - // Example: TXT - Type string `json:"type" yaml:"type"` - - // TTL for the entry - // Example: 3600 - TTL uint64 `json:"ttl,omitempty" yaml:"ttl,omitempty"` - - // Value for the record - // Example: v=spf1 mx ~all - Value string `json:"value" yaml:"value"` -} - -// NetworkZoneRecord represents a network zone (DNS) record. -// -// swagger:model -// -// API extension: network_dns_records. -type NetworkZoneRecord struct { - NetworkZoneRecordPut `yaml:",inline"` - - // The name of the record - // Example: @ - Name string `json:"name" yaml:"name"` -} - -// Writable converts a full NetworkZoneRecord struct into a NetworkZoneRecordPut struct (filters read-only fields). -func (f *NetworkZoneRecord) Writable() NetworkZoneRecordPut { - return f.NetworkZoneRecordPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/operation.go b/vendor/github.com/lxc/lxd/shared/api/operation.go deleted file mode 100644 index df5ac6b2..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/operation.go +++ /dev/null @@ -1,178 +0,0 @@ -package api - -import ( - "fmt" - "time" -) - -// OperationClassTask represents the Task OperationClass. -const OperationClassTask = "task" - -// OperationClassWebsocket represents the Websocket OperationClass. -const OperationClassWebsocket = "websocket" - -// OperationClassToken represents the Token OperationClass. -const OperationClassToken = "token" - -// Operation represents a LXD background operation -// -// swagger:model -type Operation struct { - // UUID of the operation - // Example: 6916c8a6-9b7d-4abd-90b3-aedfec7ec7da - ID string `json:"id" yaml:"id"` - - // Type of operation (task, token or websocket) - // Example: websocket - Class string `json:"class" yaml:"class"` - - // Description of the operation - // Example: Executing command - Description string `json:"description" yaml:"description"` - - // Operation creation time - // Example: 2021-03-23T17:38:37.753398689-04:00 - CreatedAt time.Time `json:"created_at" yaml:"created_at"` - - // Operation last change - // Example: 2021-03-23T17:38:37.753398689-04:00 - UpdatedAt time.Time `json:"updated_at" yaml:"updated_at"` - - // Status name - // Example: Running - Status string `json:"status" yaml:"status"` - - // Status code - // Example: 103 - StatusCode StatusCode `json:"status_code" yaml:"status_code"` - - // Affected resourcs - // Example: {"containers": ["/1.0/containers/foo"], "instances": ["/1.0/instances/foo"]} - Resources map[string][]string `json:"resources" yaml:"resources"` - - // Operation specific metadata - // Example: {"command": ["bash"], "environment": {"HOME": "/root", "LANG": "C.UTF-8", "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM": "xterm", "USER": "root"}, "fds": {"0": "da3046cf02c0116febf4ef3fe4eaecdf308e720c05e5a9c730ce1a6f15417f66", "1": "05896879d8692607bd6e4a09475667da3b5f6714418ab0ee0e5720b4c57f754b"}, "interactive": true} - Metadata map[string]any `json:"metadata" yaml:"metadata"` - - // Whether the operation can be canceled - // Example: false - MayCancel bool `json:"may_cancel" yaml:"may_cancel"` - - // Operation error mesage - // Example: Some error message - Err string `json:"err" yaml:"err"` - - // What cluster member this record was found on - // Example: lxd01 - // - // API extension: operation_location - Location string `json:"location" yaml:"location"` -} - -// ToCertificateAddToken creates a certificate add token from the operation metadata. -func (op *Operation) ToCertificateAddToken() (*CertificateAddToken, error) { - req, ok := op.Metadata["request"].(map[string]any) - if !ok { - return nil, fmt.Errorf("Operation request is type %T not map[string]any", op.Metadata["request"]) - } - - clientName, ok := req["name"].(string) - if !ok { - return nil, fmt.Errorf("Failed to get client name") - } - - secret, ok := op.Metadata["secret"].(string) - if !ok { - return nil, fmt.Errorf("Operation secret is type %T not string", op.Metadata["secret"]) - } - - fingerprint, ok := op.Metadata["fingerprint"].(string) - if !ok { - return nil, fmt.Errorf("Operation fingerprint is type %T not string", op.Metadata["fingerprint"]) - } - - addresses, ok := op.Metadata["addresses"].([]any) - if !ok { - return nil, fmt.Errorf("Operation addresses is type %T not []any", op.Metadata["addresses"]) - } - - joinToken := CertificateAddToken{ - ClientName: clientName, - Secret: secret, - Fingerprint: fingerprint, - Addresses: make([]string, 0, len(addresses)), - } - - expiresAtStr, ok := op.Metadata["expiresAt"].(string) - if ok { - expiresAt, err := time.Parse(time.RFC3339Nano, expiresAtStr) - if err != nil { - return nil, err - } - - joinToken.ExpiresAt = expiresAt - } - - for i, address := range addresses { - addressString, ok := address.(string) - if !ok { - return nil, fmt.Errorf("Operation address index %d is type %T not string", i, address) - } - - joinToken.Addresses = append(joinToken.Addresses, addressString) - } - - return &joinToken, nil -} - -// ToClusterJoinToken creates a cluster join token from the operation metadata. -func (op *Operation) ToClusterJoinToken() (*ClusterMemberJoinToken, error) { - serverName, ok := op.Metadata["serverName"].(string) - if !ok { - return nil, fmt.Errorf("Operation serverName is type %T not string", op.Metadata["serverName"]) - } - - secret, ok := op.Metadata["secret"].(string) - if !ok { - return nil, fmt.Errorf("Operation secret is type %T not string", op.Metadata["secret"]) - } - - fingerprint, ok := op.Metadata["fingerprint"].(string) - if !ok { - return nil, fmt.Errorf("Operation fingerprint is type %T not string", op.Metadata["fingerprint"]) - } - - addresses, ok := op.Metadata["addresses"].([]any) - if !ok { - return nil, fmt.Errorf("Operation addresses is type %T not []any", op.Metadata["addresses"]) - } - - expiresAtStr, ok := op.Metadata["expiresAt"].(string) - if !ok { - return nil, fmt.Errorf("Operation expiresAt is type %T not string", op.Metadata["expiresAt"]) - } - - expiresAt, err := time.Parse(time.RFC3339Nano, expiresAtStr) - if err != nil { - return nil, err - } - - joinToken := ClusterMemberJoinToken{ - ServerName: serverName, - Secret: secret, - Fingerprint: fingerprint, - Addresses: make([]string, 0, len(addresses)), - ExpiresAt: expiresAt, - } - - for i, address := range addresses { - addressString, ok := address.(string) - if !ok { - return nil, fmt.Errorf("Operation address index %d is type %T not string", i, address) - } - - joinToken.Addresses = append(joinToken.Addresses, addressString) - } - - return &joinToken, nil -} diff --git a/vendor/github.com/lxc/lxd/shared/api/profile.go b/vendor/github.com/lxc/lxd/shared/api/profile.go deleted file mode 100644 index 7b063580..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/profile.go +++ /dev/null @@ -1,67 +0,0 @@ -package api - -// ProfilesPost represents the fields of a new LXD profile -// -// swagger:model -type ProfilesPost struct { - ProfilePut `yaml:",inline"` - - // The name of the new profile - // Example: foo - Name string `json:"name" yaml:"name" db:"primary=yes"` -} - -// ProfilePost represents the fields required to rename a LXD profile -// -// swagger:model -type ProfilePost struct { - // The new name for the profile - // Example: bar - Name string `json:"name" yaml:"name"` -} - -// ProfilePut represents the modifiable fields of a LXD profile -// -// swagger:model -type ProfilePut struct { - // Instance configuration map (refer to doc/instances.md) - // Example: {"limits.cpu": "4", "limits.memory": "4GiB"} - Config map[string]string `json:"config" yaml:"config"` - - // Description of the profile - // Example: Medium size instances - Description string `json:"description" yaml:"description"` - - // List of devices - // Example: {"root": {"type": "disk", "pool": "default", "path": "/"}, "eth0": {"type": "nic", "network": "lxdbr0", "name": "eth0"}} - Devices map[string]map[string]string `json:"devices" yaml:"devices"` -} - -// Profile represents a LXD profile -// -// swagger:model -type Profile struct { - ProfilePut `yaml:",inline"` - - // The profile name - // Read only: true - // Example: foo - Name string `json:"name" yaml:"name" db:"primary=yes"` - - // List of URLs of objects using this profile - // Read only: true - // Example: ["/1.0/instances/c1", "/1.0/instances/v1"] - // - // API extension: profile_usedby - UsedBy []string `json:"used_by" yaml:"used_by"` -} - -// Writable converts a full Profile struct into a ProfilePut struct (filters read-only fields). -func (profile *Profile) Writable() ProfilePut { - return profile.ProfilePut -} - -// URL returns the URL for the profile. -func (profile *Profile) URL(apiVersion string, projectName string) *URL { - return NewURL().Path(apiVersion, "profiles", profile.Name).Project(projectName) -} diff --git a/vendor/github.com/lxc/lxd/shared/api/project.go b/vendor/github.com/lxc/lxd/shared/api/project.go deleted file mode 100644 index 4ed35890..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/project.go +++ /dev/null @@ -1,98 +0,0 @@ -package api - -// ProjectsPost represents the fields of a new LXD project -// -// swagger:model -// -// API extension: projects. -type ProjectsPost struct { - ProjectPut `yaml:",inline"` - - // The name of the new project - // Example: foo - Name string `json:"name" yaml:"name"` -} - -// ProjectPost represents the fields required to rename a LXD project -// -// swagger:model -// -// API extension: projects. -type ProjectPost struct { - // The new name for the project - // Example: bar - Name string `json:"name" yaml:"name"` -} - -// ProjectPut represents the modifiable fields of a LXD project -// -// swagger:model -// -// API extension: projects. -type ProjectPut struct { - // Project configuration map (refer to doc/projects.md) - // Example: {"features.profiles": "true", "features.networks": "false"} - Config map[string]string `json:"config" yaml:"config"` - - // Description of the project - // Example: My new project - Description string `json:"description" yaml:"description"` -} - -// Project represents a LXD project -// -// swagger:model -// -// API extension: projects. -type Project struct { - ProjectPut `yaml:",inline"` - - // The project name - // Read only: true - // Example: foo - Name string `json:"name" yaml:"name"` - - // List of URLs of objects using this project - // Read only: true - // Example: ["/1.0/images/0e60015346f06627f10580d56ac7fffd9ea775f6d4f25987217d5eed94910a20", "/1.0/instances/c1", "/1.0/networks/lxdbr0", "/1.0/profiles/default", "/1.0/storage-pools/default/volumes/custom/blah"] - UsedBy []string `json:"used_by" yaml:"used_by"` -} - -// Writable converts a full Project struct into a ProjectPut struct (filters read-only fields) -// -// API extension: projects. -func (project *Project) Writable() ProjectPut { - return project.ProjectPut -} - -// URL returns the URL for the project. -func (project *Project) URL(apiVersion string) *URL { - return NewURL().Path(apiVersion, "projects", project.Name) -} - -// ProjectState represents the current running state of a LXD project -// -// swagger:model -// -// API extension: project_usage. -type ProjectState struct { - // Allocated and used resources - // Read only: true - // Example: {"containers": {"limit": 10, "usage": 4}, "cpu": {"limit": 20, "usage": 16}} - Resources map[string]ProjectStateResource `json:"resources" yaml:"resources"` -} - -// ProjectStateResource represents the state of a particular resource in a LXD project -// -// swagger:model -// -// API extension: project_usage. -type ProjectStateResource struct { - // Limit for the resource (-1 if none) - // Example: 10 - Limit int64 - - // Current usage for the resource - // Example: 4 - Usage int64 -} diff --git a/vendor/github.com/lxc/lxd/shared/api/resource.go b/vendor/github.com/lxc/lxd/shared/api/resource.go deleted file mode 100644 index 0ccd186f..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/resource.go +++ /dev/null @@ -1,1070 +0,0 @@ -package api - -// Resources represents the system resources available for LXD -// -// swagger:model -// -// API extension: resources. -type Resources struct { - // CPU information - CPU ResourcesCPU `json:"cpu" yaml:"cpu"` - - // Memory information - Memory ResourcesMemory `json:"memory" yaml:"memory"` - - // GPU devices - // - // API extension: resources_gpu - GPU ResourcesGPU `json:"gpu" yaml:"gpu"` - - // Network devices - // - // API extension: resources_v2 - Network ResourcesNetwork `json:"network" yaml:"network"` - - // Storage devices - // - // API extension: resources_v2 - Storage ResourcesStorage `json:"storage" yaml:"storage"` - - // USB devices - // - // API extension: resources_usb_pci - USB ResourcesUSB `json:"usb" yaml:"usb"` - - // PCI devices - // - // API extension: resources_usb_pci - PCI ResourcesPCI `json:"pci" yaml:"pci"` - - // System information - // - // API extension: resources_system - System ResourcesSystem `json:"system" yaml:"system"` -} - -// ResourcesCPU represents the cpu resources available on the system -// -// swagger:model -// -// API extension: resources. -type ResourcesCPU struct { - // Architecture name - // Example: x86_64 - // - // API extension: resources_v2 - Architecture string `json:"architecture" yaml:"architecture"` - - // List of CPU sockets - Sockets []ResourcesCPUSocket `json:"sockets" yaml:"sockets"` - - // Total number of CPU threads (from all sockets and cores) - // Example: 1 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesCPUSocket represents a CPU socket on the system -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesCPUSocket struct { - // Product name - // Example: Intel(R) Core(TM) i5-7300U CPU @ 2.60GHz - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - // Vendor name - // Example: GenuineIntel - Vendor string `json:"vendor,omitempty" yaml:"vendor,omitempty"` - - // Socket number - // Example: 0 - Socket uint64 `json:"socket" yaml:"socket"` - - // List of CPU caches - Cache []ResourcesCPUCache `json:"cache,omitempty" yaml:"cache,omitempty"` - - // List of CPU cores - Cores []ResourcesCPUCore `json:"cores" yaml:"cores"` - - // Current CPU frequency (Mhz) - // Example: 3499 - Frequency uint64 `json:"frequency,omitempty" yaml:"frequency,omitempty"` - - // Minimum CPU frequency (Mhz) - // Example: 400 - FrequencyMinimum uint64 `json:"frequency_minimum,omitempty" yaml:"frequency_minimum,omitempty"` - - // Maximum CPU frequency (Mhz) - // Example: 3500 - FrequencyTurbo uint64 `json:"frequency_turbo,omitempty" yaml:"frequency_turbo,omitempty"` -} - -// ResourcesCPUCache represents a CPU cache -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesCPUCache struct { - // Cache level (usually a number from 1 to 3) - // Example: 1 - Level uint64 `json:"level" yaml:"level"` - - // Type of cache (Data, Instruction, Unified, ...) - // Example: Data - Type string `json:"type" yaml:"type"` - - // Size of the cache (in bytes) - // Example: 32768 - Size uint64 `json:"size" yaml:"size"` -} - -// ResourcesCPUCore represents a CPU core on the system -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesCPUCore struct { - // Core identifier within the socket - // Example: 0 - Core uint64 `json:"core" yaml:"core"` - - // What die the CPU is a part of (for chiplet designs) - // Example: 0 - // - // API extension: resources_cpu_core_die - Die uint64 `json:"die" yaml:"die"` - - // List of threads - Threads []ResourcesCPUThread `json:"threads" yaml:"threads"` - - // Current frequency - // Example: 3500 - Frequency uint64 `json:"frequency,omitempty" yaml:"frequency,omitempty"` -} - -// ResourcesCPUThread represents a CPU thread on the system -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesCPUThread struct { - // Thread ID (used for CPU pinning) - // Example: 0 - ID int64 `json:"id" yaml:"id"` - - // NUMA node the thread is a part of - // Example: 0 - NUMANode uint64 `json:"numa_node" yaml:"numa_node"` - - // Thread identifier within the core - // Example: 0 - Thread uint64 `json:"thread" yaml:"thread"` - - // Whether the thread is online (enabled) - // Example: true - Online bool `json:"online" yaml:"online"` - - // Whether the thread has been isolated (outside of normal scheduling) - // Example: false - // - // API extension: resource_cpu_isolated - Isolated bool `json:"isolated" yaml:"isolated"` -} - -// ResourcesGPU represents the GPU resources available on the system -// -// swagger:model -// -// API extension: resources_gpu. -type ResourcesGPU struct { - // List of GPUs - Cards []ResourcesGPUCard `json:"cards" yaml:"cards"` - - // Total number of GPUs - // Example: 1 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesGPUCard represents a GPU card on the system -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesGPUCard struct { - // Kernel driver currently associated with the GPU - // Example: i915 - Driver string `json:"driver,omitempty" yaml:"driver,omitempty"` - - // Version of the kernel driver - // Example: 5.8.0-36-generic - DriverVersion string `json:"driver_version,omitempty" yaml:"driver_version,omitempty"` - - // DRM information (if card is in used by the host) - DRM *ResourcesGPUCardDRM `json:"drm,omitempty" yaml:"drm,omitempty"` - - // SRIOV information (when supported by the card) - SRIOV *ResourcesGPUCardSRIOV `json:"sriov,omitempty" yaml:"sriov,omitempty"` - - // NVIDIA specific information - Nvidia *ResourcesGPUCardNvidia `json:"nvidia,omitempty" yaml:"nvidia,omitempty"` - - // Map of available mediated device profiles - // Example: null - // - // API extension: resources_gpu_mdev - Mdev map[string]ResourcesGPUCardMdev `json:"mdev,omitempty" yaml:"mdev,omitempty"` - - // NUMA node the GPU is a part of - // Example: 0 - NUMANode uint64 `json:"numa_node" yaml:"numa_node"` - - // PCI address - // Example: 0000:00:02.0 - PCIAddress string `json:"pci_address,omitempty" yaml:"pci_address,omitempty"` - - // Name of the vendor - // Example: Intel Corporation - Vendor string `json:"vendor,omitempty" yaml:"vendor,omitempty"` - - // PCI ID of the vendor - // Example: 8086 - VendorID string `json:"vendor_id,omitempty" yaml:"vendor_id,omitempty"` - - // Name of the product - // Example: HD Graphics 620 - Product string `json:"product,omitempty" yaml:"product,omitempty"` - - // PCI ID of the product - // Example: 5916 - ProductID string `json:"product_id,omitempty" yaml:"product_id,omitempty"` - - // USB address (for USB cards) - // Example: 2:7 - // - // API extension: resources_gpu_usb - USBAddress string `json:"usb_address,omitempty" yaml:"usb_address,omitempty"` -} - -// ResourcesGPUCardDRM represents the Linux DRM configuration of the GPU -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesGPUCardDRM struct { - // DRM card ID - // Example: 0 - ID uint64 `json:"id" yaml:"id"` - - // Card device name - // Example: card0 - CardName string `json:"card_name" yaml:"card_name"` - - // Card device number - // Example: 226:0 - CardDevice string `json:"card_device" yaml:"card_device"` - - // Control device name - // Example: controlD64 - ControlName string `json:"control_name,omitempty" yaml:"control_name,omitempty"` - - // Control device number - // Example: 226:0 - ControlDevice string `json:"control_device,omitempty" yaml:"control_device,omitempty"` - - // Render device name - // Example: renderD128 - RenderName string `json:"render_name,omitempty" yaml:"render_name,omitempty"` - - // Render device number - // Example: 226:128 - RenderDevice string `json:"render_device,omitempty" yaml:"render_device,omitempty"` -} - -// ResourcesGPUCardSRIOV represents the SRIOV configuration of the GPU -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesGPUCardSRIOV struct { - // Number of VFs currently configured - // Example: 0 - CurrentVFs uint64 `json:"current_vfs" yaml:"current_vfs"` - - // Maximum number of supported VFs - // Example: 0 - MaximumVFs uint64 `json:"maximum_vfs" yaml:"maximum_vfs"` - - // List of VFs (as additional GPU devices) - // Example: null - VFs []ResourcesGPUCard `json:"vfs" yaml:"vfs"` -} - -// ResourcesGPUCardNvidia represents additional information for NVIDIA GPUs -// -// swagger:model -// -// API extension: resources_gpu. -type ResourcesGPUCardNvidia struct { - // Version of the CUDA API - // Example: 11.0 - CUDAVersion string `json:"cuda_version,omitempty" yaml:"cuda_version,omitempty"` - - // Version of the NVRM (usually driver version) - // Example: 450.102.04 - NVRMVersion string `json:"nvrm_version,omitempty" yaml:"nvrm_version,omitempty"` - - // Brand name - // Example: GeForce - Brand string `json:"brand" yaml:"brand"` - - // Model name - // Example: GeForce GT 730 - Model string `json:"model" yaml:"model"` - - // GPU UUID - // Example: GPU-6ddadebd-dafe-2db9-f10f-125719770fd3 - UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"` - - // Architecture (generation) - // Example: 3.5 - Architecture string `json:"architecture,omitempty" yaml:"architecture,omitempty"` - - // Card device name - // Example: nvidia0 - // - // API extension: resources_v2 - CardName string `json:"card_name" yaml:"card_name"` - - // Card device number - // Example: 195:0 - // - // API extension: resources_v2 - CardDevice string `json:"card_device" yaml:"card_device"` -} - -// ResourcesGPUCardMdev represents the mediated devices configuration of the GPU -// -// swagger:model -// -// API extension: resources_gpu_mdev. -type ResourcesGPUCardMdev struct { - // The mechanism used by this device - // Example: vfio-pci - API string `json:"api" yaml:"api"` - - // Number of available devices of this profile - // Example: 2 - Available uint64 `json:"available" yaml:"available"` - - // Profile name - // Example: i915-GVTg_V5_8 - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - // Profile description - // Example: low_gm_size: 128MB\nhigh_gm_size: 512MB\nfence: 4\nresolution: 1920x1200\nweight: 4 - Description string `json:"description,omitempty" yaml:"description,omitempty"` - - // List of active devices (UUIDs) - // Example: ["42200aac-0977-495c-8c9e-6c51b9092a01", "b4950c00-1437-41d9-88f6-28d61cf9b9ef"] - Devices []string `json:"devices" yaml:"devices"` -} - -// ResourcesNetwork represents the network cards available on the system -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesNetwork struct { - // List of network cards - Cards []ResourcesNetworkCard `json:"cards" yaml:"cards"` - - // Total number of network cards - // Example: 1 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesNetworkCard represents a network card on the system -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesNetworkCard struct { - // Kernel driver currently associated with the card - // Example: atlantic - Driver string `json:"driver,omitempty" yaml:"driver,omitempty"` - - // Version of the kernel driver - // Example: 5.8.0-36-generic - DriverVersion string `json:"driver_version,omitempty" yaml:"driver_version,omitempty"` - - // List of ports on the card - Ports []ResourcesNetworkCardPort `json:"ports,omitempty" yaml:"ports,omitempty"` - - // SRIOV information (when supported by the card) - SRIOV *ResourcesNetworkCardSRIOV `json:"sriov,omitempty" yaml:"sriov,omitempty"` - - // NUMA node the card is a part of - // Example: 0 - NUMANode uint64 `json:"numa_node" yaml:"numa_node"` - - // PCI address (for PCI cards) - // Example: 0000:0d:00.0 - PCIAddress string `json:"pci_address,omitempty" yaml:"pci_address,omitempty"` - - // Name of the vendor - // Example: Aquantia Corp. - Vendor string `json:"vendor,omitempty" yaml:"vendor,omitempty"` - - // PCI ID of the vendor - // Example: 1d6a - VendorID string `json:"vendor_id,omitempty" yaml:"vendor_id,omitempty"` - - // Name of the product - // Example: AQC107 NBase-T/IEEE - Product string `json:"product,omitempty" yaml:"product,omitempty"` - - // PCI ID of the product - // Example: 87b1 - ProductID string `json:"product_id,omitempty" yaml:"product_id,omitempty"` - - // Current firmware version - // Example: 3.1.100 - // - // API extension: resources_network_firmware - FirmwareVersion string `json:"firmware_version,omitempty" yaml:"firmware_version,omitempty"` - - // USB address (for USB cards) - // Example: 2:7 - // - // API extension: resources_network_usb - USBAddress string `json:"usb_address,omitempty" yaml:"usb_address,omitempty"` -} - -// ResourcesNetworkCardPort represents a network port on the system -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesNetworkCardPort struct { - // Port identifier (interface name) - // Example: eth0 - ID string `json:"id" yaml:"id"` - - // MAC address - // Example: 00:23:a4:01:01:6f - Address string `json:"address,omitempty" yaml:"address,omitempty"` - - // Port number - // Example: 0 - Port uint64 `json:"port" yaml:"port"` - - // Transport protocol - // Example: ethernet - Protocol string `json:"protocol" yaml:"protocol"` - - // List of supported modes - // Example: ["100baseT/Full", "1000baseT/Full", "2500baseT/Full", "5000baseT/Full", "10000baseT/Full"] - SupportedModes []string `json:"supported_modes,omitempty" yaml:"supported_modes,omitempty"` - - // List of supported port types - // Example: ["twisted pair"] - SupportedPorts []string `json:"supported_ports,omitempty" yaml:"supported_ports,omitempty"` - - // Current port type - // Example: twisted pair - PortType string `json:"port_type,omitempty" yaml:"port_type,omitempty"` - - // Type of transceiver used - // Example: internal - TransceiverType string `json:"transceiver_type,omitempty" yaml:"transceiver_type,omitempty"` - - // Whether auto negotiation is used - // Example: true - AutoNegotiation bool `json:"auto_negotiation" yaml:"auto_negotiation"` - - // Whether a link was detected - // Example: true - LinkDetected bool `json:"link_detected" yaml:"link_detected"` - - // Current speed (Mbit/s) - // Example: 10000 - LinkSpeed uint64 `json:"link_speed,omitempty" yaml:"link_speed,omitempty"` - - // Duplex type - // Example: full - LinkDuplex string `json:"link_duplex,omitempty" yaml:"link_duplex,omitempty"` - - // Additional information for infiniband devices - // - // API extension: resources_infiniband - Infiniband *ResourcesNetworkCardPortInfiniband `json:"infiniband,omitempty" yaml:"infiniband,omitempty"` -} - -// ResourcesNetworkCardPortInfiniband represents the Linux Infiniband configuration for the port -// -// swagger:model -// -// API extension: resources_infiniband. -type ResourcesNetworkCardPortInfiniband struct { - // ISSM device name - // Example: issm0 - IsSMName string `json:"issm_name,omitempty" yaml:"issm_name,omitempty"` - - // ISSM device number - // Example: 231:64 - IsSMDevice string `json:"issm_device,omitempty" yaml:"issm_device,omitempty"` - - // MAD device name - // Example: umad0 - MADName string `json:"mad_name,omitempty" yaml:"mad_name,omitempty"` - - // MAD device number - // Example: 231:0 - MADDevice string `json:"mad_device,omitempty" yaml:"mad_device,omitempty"` - - // Verb device name - // Example: uverbs0 - VerbName string `json:"verb_name,omitempty" yaml:"verb_name,omitempty"` - - // Verb device number - // Example: 231:192 - VerbDevice string `json:"verb_device,omitempty" yaml:"verb_device,omitempty"` -} - -// ResourcesNetworkCardSRIOV represents the SRIOV configuration of the network card -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesNetworkCardSRIOV struct { - // Number of VFs currently configured - // Example: 0 - CurrentVFs uint64 `json:"current_vfs" yaml:"current_vfs"` - - // Maximum number of supported VFs - // Example: 0 - MaximumVFs uint64 `json:"maximum_vfs" yaml:"maximum_vfs"` - - // List of VFs (as additional Network devices) - // Example: null - VFs []ResourcesNetworkCard `json:"vfs" yaml:"vfs"` -} - -// ResourcesStorage represents the local storage -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesStorage struct { - // List of disks - Disks []ResourcesStorageDisk `json:"disks" yaml:"disks"` - - // Total number of partitions - // Example: 1 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesStorageDisk represents a disk -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesStorageDisk struct { - // ID of the disk (device name) - // Example: nvme0n1 - ID string `json:"id" yaml:"id"` - - // Device number - // Example: 259:0 - Device string `json:"device" yaml:"device"` - - // Disk model name - // Example: INTEL SSDPEKKW256G7 - Model string `json:"model,omitempty" yaml:"model,omitempty"` - - // Storage type - // Example: nvme - Type string `json:"type,omitempty" yaml:"type,omitempty"` - - // Whether the disk is read-only - // Example: false - ReadOnly bool `json:"read_only" yaml:"read_only"` - - // Total size of the disk (bytes) - // Example: 256060514304 - Size uint64 `json:"size" yaml:"size"` - - // Whether the disk is removable (hot-plug) - // Example: false - Removable bool `json:"removable" yaml:"removable"` - - // WWN identifier - // Example: eui.0000000001000000e4d25cafae2e4c00 - WWN string `json:"wwn,omitempty" yaml:"wwn,omitempty"` - - // NUMA node the disk is a part of - // Example: 0 - NUMANode uint64 `json:"numa_node" yaml:"numa_node"` - - // Device by-path identifier - // Example: pci-0000:05:00.0-nvme-1 - // - // API extension: resources_disk_sata - DevicePath string `json:"device_path,omitempty" yaml:"device_path,omitempty"` - - // Block size - // Example: 512 - // - // API extension: resources_disk_sata - BlockSize uint64 `json:"block_size" yaml:"block_size"` - - // Current firmware version - // Example: PSF121C - // - // API extension: resources_disk_sata - FirmwareVersion string `json:"firmware_version,omitempty" yaml:"firmware_version,omitempty"` - - // Rotation speed (RPM) - // Example: 0 - // - // API extension: resources_disk_sata - RPM uint64 `json:"rpm" yaml:"rpm"` - - // Serial number - // Example: BTPY63440ARH256D - // - // API extension: resources_disk_sata - Serial string `json:"serial,omitempty" yaml:"serial,omitempty"` - - // Device by-id identifier - // Example: nvme-eui.0000000001000000e4d25cafae2e4c00 - // - // API extension: resources_disk_id - DeviceID string `json:"device_id" yaml:"device_id"` - - // List of partitions - Partitions []ResourcesStorageDiskPartition `json:"partitions" yaml:"partitions"` - - // PCI address - // Example: 0000:05:00.0 - // - // API extension: resources_disk_address - PCIAddress string `json:"pci_address,omitempty" yaml:"pci_address,omitempty"` - - // USB address - // Example: 3:5 - // - // API extension: resources_disk_address - USBAddress string `json:"usb_address,omitempty" yaml:"usb_address,omitempty"` -} - -// ResourcesStorageDiskPartition represents a partition on a disk -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesStorageDiskPartition struct { - // ID of the partition (device name) - // Example: nvme0n1p1 - ID string `json:"id" yaml:"id"` - - // Device number - // Example: 259:1 - Device string `json:"device" yaml:"device"` - - // Whether the partition is read-only - // Example: false - ReadOnly bool `json:"read_only" yaml:"read_only"` - - // Size of the partition (bytes) - // Example: 254933278208 - Size uint64 `json:"size" yaml:"size"` - - // Partition number - // Example: 1 - Partition uint64 `json:"partition" yaml:"partition"` -} - -// ResourcesMemory represents the memory resources available on the system -// -// swagger:model -// -// API extension: resources. -type ResourcesMemory struct { - // List of NUMA memory nodes - // Example: null - // - // API extension: resources_v2 - Nodes []ResourcesMemoryNode `json:"nodes,omitempty" yaml:"nodes,omitempty"` - - // Total of memory huge pages (bytes) - // Example: 429284917248 - HugepagesTotal uint64 `json:"hugepages_total" yaml:"hugepages_total"` - - // Used memory huge pages (bytes) - // Example: 429284917248 - HugepagesUsed uint64 `json:"hugepages_used" yaml:"hugepages_used"` - - // Size of memory huge pages (bytes) - // Example: 2097152 - HugepagesSize uint64 `json:"hugepages_size" yaml:"hugepages_size"` - - // Used system memory (bytes) - // Example: 557450502144 - Used uint64 `json:"used" yaml:"used"` - - // Total system memory (bytes) - // Example: 687194767360 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesMemoryNode represents the node-specific memory resources available on the system -// -// swagger:model -// -// API extension: resources_v2. -type ResourcesMemoryNode struct { - // NUMA node identifier - // Example: 0 - NUMANode uint64 `json:"numa_node" yaml:"numa_node"` - - // Used memory huge pages (bytes) - // Example: 214536552448 - HugepagesUsed uint64 `json:"hugepages_used" yaml:"hugepages_used"` - - // Total of memory huge pages (bytes) - // Example: 214536552448 - HugepagesTotal uint64 `json:"hugepages_total" yaml:"hugepages_total"` - - // Used system memory (bytes) - // Example: 264880439296 - Used uint64 `json:"used" yaml:"used"` - - // Total system memory (bytes) - // Example: 343597383680 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesStoragePool represents the resources available to a given storage pool -// -// swagger:model -// -// API extension: resources. -type ResourcesStoragePool struct { - // Disk space usage - Space ResourcesStoragePoolSpace `json:"space,omitempty" yaml:"space,omitempty"` - - // DIsk inode usage - Inodes ResourcesStoragePoolInodes `json:"inodes,omitempty" yaml:"inodes,omitempty"` -} - -// ResourcesStoragePoolSpace represents the space available to a given storage pool -// -// swagger:model -// -// API extension: resources. -type ResourcesStoragePoolSpace struct { - // Used disk space (bytes) - // Example: 343537419776 - Used uint64 `json:"used,omitempty" yaml:"used,omitempty"` - - // Total disk space (bytes) - // Example: 420100937728 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesStoragePoolInodes represents the inodes available to a given storage pool -// -// swagger:model -// -// API extension: resources. -type ResourcesStoragePoolInodes struct { - // Used inodes - // Example: 23937695 - Used uint64 `json:"used" yaml:"used"` - - // Total inodes - // Example: 30709993797 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesUSB represents the USB devices available on the system -// -// swagger:model -// -// API extension: resources_usb_pci. -type ResourcesUSB struct { - // List of USB devices - Devices []ResourcesUSBDevice `json:"devices" yaml:"devices"` - - // Total number of USB devices - // Example: 1 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesUSBDevice represents a USB device -// -// swagger:model -// -// API extension: resources_usb_pci. -type ResourcesUSBDevice struct { - // USB address (bus) - // Example: 1 - BusAddress uint64 `json:"bus_address" yaml:"bus_address"` - - // USB address (device) - // Example: 3 - DeviceAddress uint64 `json:"device_address" yaml:"device_address"` - - // List of USB interfaces - Interfaces []ResourcesUSBDeviceInterface `json:"interfaces" yaml:"interfaces"` - - // Name of the vendor - // Example: ATEN International Co., Ltd - Vendor string `json:"vendor" yaml:"vendor"` - - // USB ID of the vendor - // Example: 0557 - VendorID string `json:"vendor_id" yaml:"vendor_id"` - - // Name of the product - // Example: Hermon USB hidmouse Device - Product string `json:"product" yaml:"product"` - - // USB ID of the product - // Example: 2221 - ProductID string `json:"product_id" yaml:"product_id"` - - // Transfer speed (Mbit/s) - // Example: 12 - Speed float64 `json:"speed" yaml:"speed"` -} - -// ResourcesUSBDeviceInterface represents a USB device interface -// -// swagger:model -// -// API extension: resources_usb_pci. -type ResourcesUSBDeviceInterface struct { - // Class of USB interface - // Example: Human Interface Device - Class string `json:"class" yaml:"class"` - - // ID of the USB interface class - // Example: 3 - ClassID uint64 `json:"class_id" yaml:"class_id"` - - // Kernel driver currently associated with the device - // Example: usbhid - Driver string `json:"driver" yaml:"driver"` - - // Version of the kernel driver - // Example: 5.8.0-36-generic - DriverVersion string `json:"driver_version" yaml:"driver_version"` - - // Interface number - // Example: 0 - Number uint64 `json:"number" yaml:"number"` - - // Sub class of the interface - // Example: Boot Interface Subclass - SubClass string `json:"subclass" yaml:"subclass"` - - // ID of the USB interface sub class - // Example: 1 - SubClassID uint64 `json:"subclass_id" yaml:"subclass_id"` -} - -// ResourcesPCI represents the PCI devices available on the system -// -// swagger:model -// -// API extension: resources_usb_pci. -type ResourcesPCI struct { - // List of PCI devices - Devices []ResourcesPCIDevice `json:"devices" yaml:"devices"` - - // Total number of PCI devices - // Example: 1 - Total uint64 `json:"total" yaml:"total"` -} - -// ResourcesPCIDevice represents a PCI device -// -// swagger:model -// -// API extension: resources_usb_pci. -type ResourcesPCIDevice struct { - // Kernel driver currently associated with the GPU - // Example: mgag200 - Driver string `json:"driver" yaml:"driver"` - - // Version of the kernel driver - // Example: 5.8.0-36-generic - DriverVersion string `json:"driver_version" yaml:"driver_version"` - - // NUMA node the card is a part of - // Example: 0 - NUMANode uint64 `json:"numa_node" yaml:"numa_node"` - - // PCI address - // Example: 0000:07:03.0 - PCIAddress string `json:"pci_address" yaml:"pci_address"` - - // Name of the vendor - // Example: Matrox Electronics Systems Ltd. - Vendor string `json:"vendor" yaml:"vendor"` - - // PCI ID of the vendor - // Example: 102b - VendorID string `json:"vendor_id" yaml:"vendor_id"` - - // Name of the product - // Example: MGA G200eW WPCM450 - Product string `json:"product" yaml:"product"` - - // PCI ID of the product - // Example: 0532 - ProductID string `json:"product_id" yaml:"product_id"` - - // IOMMU group number - // Example: 20 - // - // API extension: resources_pci_iommu - IOMMUGroup uint64 `json:"iommu_group" yaml:"iommu_group"` - - // Vital Product Data - // Example: - // - // API extension: resources_pci_vpd - VPD ResourcesPCIVPD `json:"vpd" yaml:"vpd"` -} - -// ResourcesPCIVPD represents VPD entries for a device -// -// swagger:model -// -// API extension: resources_pci_vpd. -type ResourcesPCIVPD struct { - // Hardware provided product name. - // Example: HP Ethernet 1Gb 4-port 331i Adapter - ProductName string `json:"product_name,omitempty" yaml:"product_name,omitempty"` - - // Dict of vendor provided key/value pairs. - // Example: {"EC": ""A-5545", "MN": "103C", "V0": "5W PCIeGen2"} - Entries map[string]string `json:"entries,omitempty" yaml:"entries,omitempty"` -} - -// ResourcesSystem represents the system -// -// swagger:model -// -// API extension: resources_system. -type ResourcesSystem struct { - // System UUID - // Example: 7fa1c0cc-2271-11b2-a85c-aab32a05d71a - UUID string `json:"uuid" yaml:"uuid"` - - // System vendor - // Example: LENOVO - Vendor string `json:"vendor" yaml:"vendor"` - - // System model - // Example: 20HRCTO1WW - Product string `json:"product" yaml:"product"` - - // System family - // Example: ThinkPad X1 Carbon 5th - Family string `json:"family" yaml:"family"` - - // System version - // Example: ThinkPad X1 Carbon 5th - Version string `json:"version" yaml:"version"` - - // System nanufacturer SKU - // LENOVO_MT_20HR_BU_Think_FM_ThinkPad X1 Carbon 5th - Sku string `json:"sku" yaml:"sku"` - - // System serial number - // Example: PY3DD4X9 - Serial string `json:"serial" yaml:"serial"` - - // System type (unknown, physical, virtual-machine, container, ...) - // Example: physical - Type string `json:"type" yaml:"type"` - - // Firmware details - Firmware *ResourcesSystemFirmware `json:"firmware" yaml:"firmware"` - - // Chassis details - Chassis *ResourcesSystemChassis `json:"chassis" yaml:"chassis"` - - // Motherboard details - Motherboard *ResourcesSystemMotherboard `json:"motherboard" yaml:"motherboard"` -} - -// ResourcesSystemFirmware represents the system firmware -// -// swagger:model -// -// API extension: resources_system. -type ResourcesSystemFirmware struct { - // Firmware vendor - // Example: Lenovo - Vendor string `json:"vendor" yaml:"vendor"` - - // Firmware build date - // Example: 10/14/2020 - Date string `json:"date" yaml:"date"` - - // Firmware version - // Example: N1MET64W (1.49) - Version string `json:"version" yaml:"version"` -} - -// ResourcesSystemChassis represents the system chassis -// -// swagger:model -// -// API extension: resources_system. -type ResourcesSystemChassis struct { - // Chassis vendor - // Example: Lenovo - Vendor string `json:"vendor" yaml:"vendor"` - - // Chassis type - // Example: Notebook - Type string `json:"type" yaml:"type"` - - // Chassis serial number - // Example: PY3DD4X9 - Serial string `json:"serial" yaml:"serial"` - - // Chassis version/revision - // Example: None - Version string `json:"version" yaml:"version"` -} - -// ResourcesSystemMotherboard represents the motherboard -// -// swagger:model -// -// API extension: resources_system. -type ResourcesSystemMotherboard struct { - // Motherboard vendor - // Example: Lenovo - Vendor string `json:"vendor" yaml:"vendor"` - - // Motherboard model - // Example: 20HRCTO1WW - Product string `json:"product" yaml:"product"` - - // Motherboard serial number - // Example: L3CF4FX003A - Serial string `json:"serial" yaml:"serial"` - - // Motherboard version/revision - // Example: None - Version string `json:"version" yaml:"version"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/response.go b/vendor/github.com/lxc/lxd/shared/api/response.go deleted file mode 100644 index 00718491..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/response.go +++ /dev/null @@ -1,90 +0,0 @@ -package api - -import ( - "encoding/json" -) - -// ResponseRaw represents a LXD operation in its original form. -type ResponseRaw struct { - Type ResponseType `json:"type" yaml:"type"` - - // Valid only for Sync responses - Status string `json:"status" yaml:"status"` - StatusCode int `json:"status_code" yaml:"status_code"` - - // Valid only for Async responses - Operation string `json:"operation" yaml:"operation"` - - // Valid only for Error responses - Code int `json:"error_code" yaml:"error_code"` - Error string `json:"error" yaml:"error"` - - Metadata any `json:"metadata" yaml:"metadata"` -} - -// Response represents a LXD operation. -type Response struct { - Type ResponseType `json:"type" yaml:"type"` - - // Valid only for Sync responses - Status string `json:"status" yaml:"status"` - StatusCode int `json:"status_code" yaml:"status_code"` - - // Valid only for Async responses - Operation string `json:"operation" yaml:"operation"` - - // Valid only for Error responses - Code int `json:"error_code" yaml:"error_code"` - Error string `json:"error" yaml:"error"` - - // Valid for Sync and Error responses - Metadata json.RawMessage `json:"metadata" yaml:"metadata"` -} - -// MetadataAsMap parses the Response metadata into a map. -func (r *Response) MetadataAsMap() (map[string]any, error) { - ret := map[string]any{} - err := r.MetadataAsStruct(&ret) - if err != nil { - return nil, err - } - - return ret, nil -} - -// MetadataAsOperation turns the Response metadata into an Operation. -func (r *Response) MetadataAsOperation() (*Operation, error) { - op := Operation{} - err := r.MetadataAsStruct(&op) - if err != nil { - return nil, err - } - - return &op, nil -} - -// MetadataAsStringSlice parses the Response metadata into a slice of string. -func (r *Response) MetadataAsStringSlice() ([]string, error) { - sl := []string{} - err := r.MetadataAsStruct(&sl) - if err != nil { - return nil, err - } - - return sl, nil -} - -// MetadataAsStruct parses the Response metadata into a provided struct. -func (r *Response) MetadataAsStruct(target any) error { - return json.Unmarshal(r.Metadata, &target) -} - -// ResponseType represents a valid LXD response type. -type ResponseType string - -// LXD response types. -const ( - SyncResponse ResponseType = "sync" - AsyncResponse ResponseType = "async" - ErrorResponse ResponseType = "error" -) diff --git a/vendor/github.com/lxc/lxd/shared/api/server.go b/vendor/github.com/lxc/lxd/shared/api/server.go deleted file mode 100644 index b1fcef2b..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/server.go +++ /dev/null @@ -1,206 +0,0 @@ -package api - -// ServerEnvironment represents the read-only environment fields of a LXD server. -type ServerEnvironment struct { - // List of addresses the server is listening on - // Example: [":8443"] - Addresses []string `json:"addresses" yaml:"addresses"` - - // List of architectures supported by the server - // Example: ["x86_64", "i686"] - Architectures []string `json:"architectures" yaml:"architectures"` - - // Server certificate as PEM encoded X509 - // Example: X509 PEM certificate - Certificate string `json:"certificate" yaml:"certificate"` - - // Server certificate fingerprint as SHA256 - // Example: fd200419b271f1dc2a5591b693cc5774b7f234e1ff8c6b78ad703b6888fe2b69 - CertificateFingerprint string `json:"certificate_fingerprint" yaml:"certificate_fingerprint"` - - // List of supported instance drivers (separate by " | ") - // Example: lxc | qemu - Driver string `json:"driver" yaml:"driver"` - - // List of supported instance driver versions (separate by " | ") - // Example: 4.0.7 | 5.2.0 - DriverVersion string `json:"driver_version" yaml:"driver_version"` - - // Current firewall driver - // Example: nftables - // - // API extension: firewall_driver - Firewall string `json:"firewall" yaml:"firewall"` - - // OS kernel name - // Example: Linux - Kernel string `json:"kernel" yaml:"kernel"` - - // OS kernel architecture - // Example: x86_64 - KernelArchitecture string `json:"kernel_architecture" yaml:"kernel_architecture"` - - // Map of kernel features that were tested on startup - // Example: {"netnsid_getifaddrs": "true", "seccomp_listener": "true"} - // - // API extension: kernel_features - KernelFeatures map[string]string `json:"kernel_features" yaml:"kernel_features"` - - // Kernel version - // Example: 5.4.0-36-generic - KernelVersion string `json:"kernel_version" yaml:"kernel_version"` - - // Map of LXC features that were tested on startup - // Example: {"cgroup2": "true", "devpts_fd": "true", "pidfd": "true"} - // - // API extension: lxc_features - LXCFeatures map[string]string `json:"lxc_features" yaml:"lxc_features"` - - // Name of the operating system (Linux distribution) - // Example: Ubuntu - // - // API extension: api_os - OSName string `json:"os_name" yaml:"os_name"` - - // Version of the operating system (Linux distribution) - // Example: 22.04 - // - // API extension: api_os - OSVersion string `json:"os_version" yaml:"os_version"` - - // Current project name - // Example: default - // - // API extension: projects - Project string `json:"project" yaml:"project"` - - // Server implementation name - // Example: lxd - Server string `json:"server" yaml:"server"` - - // Whether the server is part of a cluster - // Example: false - // - // API extension: clustering - ServerClustered bool `json:"server_clustered" yaml:"server_clustered"` - - // Mode that the event distribution subsystem is operating in on this server. - // Either "full-mesh", "hub-server" or "hub-client". - // Example: full-mesh - // - // API extension: event_hub - ServerEventMode string `json:"server_event_mode" yaml:"server_event_mode"` - - // Server hostname - // Example: castiana - // - // API extension: clustering - ServerName string `json:"server_name" yaml:"server_name"` - - // PID of the LXD process - // Example: 1453969 - ServerPid int `json:"server_pid" yaml:"server_pid"` - - // Server version - // Example: 4.11 - ServerVersion string `json:"server_version" yaml:"server_version"` - - // List of active storage drivers (separate by " | ") - // Example: dir | zfs - Storage string `json:"storage" yaml:"storage"` - - // List of active storage driver versions (separate by " | ") - // Example: 1 | 0.8.4-1ubuntu11 - StorageVersion string `json:"storage_version" yaml:"storage_version"` - - // List of supported storage drivers - StorageSupportedDrivers []ServerStorageDriverInfo `json:"storage_supported_drivers" yaml:"storage_supported_drivers"` -} - -// ServerStorageDriverInfo represents the read-only info about a storage driver -// -// swagger:model -// -// API extension: server_supported_storage_drivers. -type ServerStorageDriverInfo struct { - // Name of the driver - // Example: zfs - // - // API extension: server_supported_storage_drivers - Name string - - // Version of the driver - // Example: 0.8.4-1ubuntu11 - // - // API extension: server_supported_storage_drivers - Version string - - // Whether the driver has remote volumes - // Example: false - // - // API extension: server_supported_storage_drivers - Remote bool -} - -// ServerPut represents the modifiable fields of a LXD server configuration -// -// swagger:model -type ServerPut struct { - // Server configuration map (refer to doc/server.md) - // Example: {"core.https_address": ":8443", "core.trust_password": true} - Config map[string]any `json:"config" yaml:"config"` -} - -// ServerUntrusted represents a LXD server for an untrusted client -// -// swagger:model -type ServerUntrusted struct { - // List of supported API extensions - // Read only: true - // Example: ["etag", "patch", "network", "storage"] - APIExtensions []string `json:"api_extensions" yaml:"api_extensions"` - - // Support status of the current API (one of "devel", "stable" or "deprecated") - // Read only: true - // Example: stable - APIStatus string `json:"api_status" yaml:"api_status"` - - // API version number - // Read only: true - // Example: 1.0 - APIVersion string `json:"api_version" yaml:"api_version"` - - // Whether the client is trusted (one of "trusted" or "untrusted") - // Read only: true - // Example: untrusted - Auth string `json:"auth" yaml:"auth"` - - // Whether the server is public-only (only public endpoints are implemented) - // Read only: true - // Example: false - Public bool `json:"public" yaml:"public"` - - // List of supported authentication methods - // Read only: true - // Example: ["tls", "candid"] - // - // API extension: macaroon_authentication - AuthMethods []string `json:"auth_methods" yaml:"auth_methods"` -} - -// Server represents a LXD server -// -// swagger:model -type Server struct { - ServerPut `yaml:",inline"` - ServerUntrusted `yaml:",inline"` - - // Read-only status/configuration information - // Read only: true - Environment ServerEnvironment `json:"environment" yaml:"environment"` -} - -// Writable converts a full Server struct into a ServerPut struct (filters read-only fields). -func (srv *Server) Writable() ServerPut { - return srv.ServerPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/status_code.go b/vendor/github.com/lxc/lxd/shared/api/status_code.go deleted file mode 100644 index 1f6bb73b..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/status_code.go +++ /dev/null @@ -1,69 +0,0 @@ -package api - -// StatusCode represents a valid LXD operation and container status. -type StatusCode int - -// LXD status codes. -const ( - OperationCreated StatusCode = 100 - Started StatusCode = 101 - Stopped StatusCode = 102 - Running StatusCode = 103 - Cancelling StatusCode = 104 - Pending StatusCode = 105 - Starting StatusCode = 106 - Stopping StatusCode = 107 - Aborting StatusCode = 108 - Freezing StatusCode = 109 - Frozen StatusCode = 110 - Thawed StatusCode = 111 - Error StatusCode = 112 - Ready StatusCode = 113 - - Success StatusCode = 200 - - Failure StatusCode = 400 - Cancelled StatusCode = 401 -) - -// StatusCodeNames associates a status code to its name. -var StatusCodeNames = map[StatusCode]string{ - OperationCreated: "Operation created", - Started: "Started", - Stopped: "Stopped", - Running: "Running", - Cancelling: "Cancelling", - Pending: "Pending", - Success: "Success", - Failure: "Failure", - Cancelled: "Cancelled", - Starting: "Starting", - Stopping: "Stopping", - Aborting: "Aborting", - Freezing: "Freezing", - Frozen: "Frozen", - Thawed: "Thawed", - Error: "Error", - Ready: "Ready", -} - -// String returns a suitable string representation for the status code. -func (o StatusCode) String() string { - return StatusCodeNames[o] -} - -// IsFinal will return true if the status code indicates an end state. -func (o StatusCode) IsFinal() bool { - return int(o) >= 200 -} - -// StatusCodeFromString returns the status code of the giving status name. -func StatusCodeFromString(status string) StatusCode { - for k, v := range StatusCodeNames { - if v == status { - return k - } - } - - return -1 -} diff --git a/vendor/github.com/lxc/lxd/shared/api/storage_pool.go b/vendor/github.com/lxc/lxd/shared/api/storage_pool.go deleted file mode 100644 index cdd38509..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/storage_pool.go +++ /dev/null @@ -1,100 +0,0 @@ -package api - -// StoragePoolStatusPending storage pool is pending creation on other cluster nodes. -const StoragePoolStatusPending = "Pending" - -// StoragePoolStatusCreated storage pool is fully created. -const StoragePoolStatusCreated = "Created" - -// StoragePoolStatusErrored storage pool is in error status. -const StoragePoolStatusErrored = "Errored" - -// StoragePoolStatusUnknown storage pool is in unknown status. -const StoragePoolStatusUnknown = "Unknown" - -// StoragePoolStatusUnvailable storage pool failed to initialize. -const StoragePoolStatusUnvailable = "Unavailable" - -// StoragePoolsPost represents the fields of a new LXD storage pool -// -// swagger:model -// -// API extension: storage. -type StoragePoolsPost struct { - StoragePoolPut `yaml:",inline"` - - // Storage pool name - // Example: local - Name string `json:"name" yaml:"name"` - - // Storage pool driver (btrfs, ceph, cephfs, dir, lvm or zfs) - // Example: zfs - Driver string `json:"driver" yaml:"driver"` -} - -// StoragePool represents the fields of a LXD storage pool. -// -// swagger:model -// -// API extension: storage. -type StoragePool struct { - StoragePoolPut `yaml:",inline"` - - // Storage pool name - // Example: local - Name string `json:"name" yaml:"name"` - - // Storage pool driver (btrfs, ceph, cephfs, dir, lvm or zfs) - // Example: zfs - Driver string `json:"driver" yaml:"driver"` - - // List of URLs of objects using this storage pool - // Example: ["/1.0/profiles/default", "/1.0/instances/c1"] - UsedBy []string `json:"used_by" yaml:"used_by"` - - // Pool status (Pending, Created, Errored or Unknown) - // Read only: true - // Example: Created - // - // API extension: clustering - Status string `json:"status" yaml:"status"` - - // Cluster members on which the storage pool has been defined - // Read only: true - // Example: ["lxd01", "lxd02", "lxd03"] - // - // API extension: clustering - Locations []string `json:"locations" yaml:"locations"` -} - -// StoragePoolPut represents the modifiable fields of a LXD storage pool. -// -// swagger:model -// -// API extension: storage. -type StoragePoolPut struct { - // Storage pool configuration map (refer to doc/storage.md) - // Example: {"volume.block.filesystem": "ext4", "volume.size": "50GiB"} - Config map[string]string `json:"config" yaml:"config"` - - // Description of the storage pool - // Example: Local SSD pool - // - // API extension: entity_description - Description string `json:"description" yaml:"description"` -} - -// Writable converts a full StoragePool struct into a StoragePoolPut struct -// (filters read-only fields). -func (storagePool *StoragePool) Writable() StoragePoolPut { - return storagePool.StoragePoolPut -} - -// StoragePoolState represents the state of a storage pool. -// -// swagger:model -// -// API extension: cluster_member_state. -type StoragePoolState struct { - ResourcesStoragePool `yaml:",inline"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/storage_pool_bucket.go b/vendor/github.com/lxc/lxd/shared/api/storage_pool_bucket.go deleted file mode 100644 index e857573a..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/storage_pool_bucket.go +++ /dev/null @@ -1,148 +0,0 @@ -package api - -// StorageBucketsPost represents the fields of a new LXD storage pool bucket -// -// swagger:model -// -// API extension: storage_buckets. -type StorageBucketsPost struct { - StorageBucketPut `yaml:",inline"` - - // Bucket name - // Example: foo - // - // API extension: storage_buckets - Name string `json:"name" yaml:"name"` -} - -// StorageBucketPut represents the modifiable fields of a LXD storage pool bucket -// -// swagger:model -// -// API extension: storage_buckets. -type StorageBucketPut struct { - // Storage bucket configuration map - // Example: {"size": "50GiB"} - // - // API extension: storage_buckets - Config map[string]string `json:"config" yaml:"config"` - - // Description of the storage bucket - // Example: My custom bucket - // - // API extension: storage_buckets - Description string `json:"description" yaml:"description"` -} - -// StorageBucket represents the fields of a LXD storage pool bucket -// -// swagger:model -// -// API extension: storage_buckets. -type StorageBucket struct { - StorageBucketPut `yaml:",inline"` - - // Bucket name - // Example: foo - // - // API extension: storage_buckets - Name string `json:"name" yaml:"name"` - - // Bucket S3 URL - // Example: https://127.0.0.1:8080/foo - // - // API extension: storage_buckets - S3URL string `json:"s3_url" yaml:"s3_url"` - - // What cluster member this record was found on - // Example: lxd01 - // - // API extension: storage_buckets - Location string `json:"location" yaml:"location"` -} - -// Etag returns the values used for etag generation. -func (b *StorageBucket) Etag() []any { - return []any{b.Name, b.Description, b.Config} -} - -// Writable converts a full StorageBucket struct into a StorageBucketPut struct (filters read-only fields). -func (b *StorageBucket) Writable() StorageBucketPut { - return b.StorageBucketPut -} - -// URL returns the URL for the bucket. -func (b *StorageBucket) URL(apiVersion string, poolName string, projectName string) *URL { - return NewURL().Path(apiVersion, "storage-pools", poolName, "buckets", b.Name).Project(projectName).Target(b.Location) -} - -// StorageBucketKeysPost represents the fields of a new LXD storage pool bucket key -// -// swagger:model -// -// API extension: storage_buckets. -type StorageBucketKeysPost struct { - StorageBucketKeyPut `yaml:",inline"` - - // Key name - // Example: my-read-only-key - // - // API extension: storage_buckets - Name string `json:"name" yaml:"name"` -} - -// StorageBucketKeyPut represents the modifiable fields of a LXD storage pool bucket key -// -// swagger:model -// -// API extension: storage_buckets. -type StorageBucketKeyPut struct { - // Description of the storage bucket key - // Example: My read-only bucket key - // - // API extension: storage_buckets - Description string `json:"description" yaml:"description"` - - // Whether the key can perform write actions or not. - // Example: read-only - // - // API extension: storage_buckets - Role string `json:"role" yaml:"role"` - - // Access key - // Example: 33UgkaIBLBIxb7O1 - // - // API extension: storage_buckets - AccessKey string `json:"access-key" yaml:"access-key"` - - // Secret key - // Example: kDQD6AOgwHgaQI1UIJBJpPaiLgZuJbq0 - // - // API extension: storage_buckets - SecretKey string `json:"secret-key" yaml:"secret-key"` -} - -// StorageBucketKey represents the fields of a LXD storage pool bucket key -// -// swagger:model -// -// API extension: storage_buckets. -type StorageBucketKey struct { - StorageBucketKeyPut `yaml:",inline"` - - // Key name - // Example: my-read-only-key - // - // API extension: storage_buckets - Name string `json:"name" yaml:"name"` -} - -// Etag returns the values used for etag generation. -func (b *StorageBucketKey) Etag() []any { - return []any{b.Name, b.Description, b.Role, b.AccessKey, b.SecretKey} -} - -// Writable converts a full StorageBucketKey struct into a StorageBucketKeyPut struct (filters read-only fields). -func (b *StorageBucketKey) Writable() StorageBucketKeyPut { - return b.StorageBucketKeyPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume.go b/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume.go deleted file mode 100644 index 51a62547..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume.go +++ /dev/null @@ -1,239 +0,0 @@ -package api - -import ( - "time" -) - -// StorageVolumesPost represents the fields of a new LXD storage pool volume -// -// swagger:model -// -// API extension: storage. -type StorageVolumesPost struct { - StorageVolumePut `yaml:",inline"` - - // Volume name - // Example: foo - Name string `json:"name" yaml:"name"` - - // Volume type (container, custom, image or virtual-machine) - // Example: custom - Type string `json:"type" yaml:"type"` - - // Migration source - // - // API extension: storage_api_local_volume_handling - Source StorageVolumeSource `json:"source" yaml:"source"` - - // Volume content type (filesystem or block) - // Example: filesystem - // - // API extension: custom_block_volumes - ContentType string `json:"content_type" yaml:"content_type"` -} - -// StorageVolumePost represents the fields required to rename a LXD storage pool volume -// -// swagger:model -// -// API extension: storage_api_volume_rename. -type StorageVolumePost struct { - // New volume name - // Example: foo - Name string `json:"name" yaml:"name"` - - // New storage pool - // Example: remote - // - // API extension: storage_api_local_volume_handling - Pool string `json:"pool,omitempty" yaml:"pool,omitempty"` - - // Initiate volume migration - // Example: false - // - // API extension: storage_api_remote_volume_handling - Migration bool `json:"migration" yaml:"migration"` - - // Migration target (for push mode) - // - // API extension: storage_api_remote_volume_handling - Target *StorageVolumePostTarget `json:"target" yaml:"target"` - - // Whether snapshots should be discarded (migration only) - // Example: false - // - // API extension: storage_api_remote_volume_snapshots - VolumeOnly bool `json:"volume_only" yaml:"volume_only"` - - // New project name - // Example: foo - // - // API extension: storage_volume_project_move - Project string `json:"project,omitempty" yaml:"project,omitempty"` -} - -// StorageVolumePostTarget represents the migration target host and operation -// -// swagger:model -// -// API extension: storage_api_remote_volume_handling. -type StorageVolumePostTarget struct { - // The certificate of the migration target - // Example: X509 PEM certificate - Certificate string `json:"certificate" yaml:"certificate"` - - // Remote operation URL (for migration) - // Example: https://1.2.3.4:8443/1.0/operations/1721ae08-b6a8-416a-9614-3f89302466e1 - Operation string `json:"operation,omitempty" yaml:"operation,omitempty"` - - // Migration websockets credentials - // Example: {"migration": "random-string"} - Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"` -} - -// StorageVolume represents the fields of a LXD storage volume. -// -// swagger:model -// -// API extension: storage. -type StorageVolume struct { - StorageVolumePut `yaml:",inline"` - - // Volume name - // Example: foo - Name string `json:"name" yaml:"name"` - - // Volume type - // Example: custom - Type string `json:"type" yaml:"type"` - - // List of URLs of objects using this storage volume - // Example: ["/1.0/instances/blah"] - UsedBy []string `json:"used_by" yaml:"used_by"` - - // What cluster member this record was found on - // Example: lxd01 - // - // API extension: clustering - Location string `json:"location" yaml:"location"` - - // Volume content type (filesystem or block) - // Example: filesystem - // - // API extension: custom_block_volumes - ContentType string `json:"content_type" yaml:"content_type"` - - // Project containing the volume. - // Example: default - // - // API extension: storage_volumes_all_projects - Project string `json:"project" yaml:"project"` - - // Volume creation timestamp - // Example: 2021-03-23T20:00:00-04:00 - // API extension: storage_volumes_created_at - CreatedAt time.Time `json:"created_at" yaml:"created_at"` -} - -// URL returns the URL for the volume. -func (v *StorageVolume) URL(apiVersion string, poolName string) *URL { - u := NewURL() - - volName, snapName, isSnap := GetParentAndSnapshotName(v.Name) - if isSnap { - u = u.Path(apiVersion, "storage-pools", poolName, "volumes", v.Type, volName, "snapshots", snapName) - } else { - u = u.Path(apiVersion, "storage-pools", poolName, "volumes", v.Type, volName) - } - - return u.Project(v.Project).Target(v.Location) -} - -// StorageVolumePut represents the modifiable fields of a LXD storage volume -// -// swagger:model -// -// API extension: storage. -type StorageVolumePut struct { - // Storage volume configuration map (refer to doc/storage.md) - // Example: {"zfs.remove_snapshots": "true", "size": "50GiB"} - Config map[string]string `json:"config" yaml:"config"` - - // Description of the storage volume - // Example: My custom volume - // - // API extension: entity_description - Description string `json:"description" yaml:"description"` - - // Name of a snapshot to restore - // Example: snap0 - // - // API extension: storage_api_volume_snapshots - Restore string `json:"restore,omitempty" yaml:"restore,omitempty"` -} - -// StorageVolumeSource represents the creation source for a new storage volume -// -// swagger:model -// -// API extension: storage_api_local_volume_handling. -type StorageVolumeSource struct { - // Source volume name (for copy) - // Example: foo - Name string `json:"name" yaml:"name"` - - // Source type (copy or migration) - // Example: copy - Type string `json:"type" yaml:"type"` - - // Source storage pool (for copy) - // Example: local - Pool string `json:"pool" yaml:"pool"` - - // Certificate (for migration) - // Example: X509 PEM certificate - // - // API extension: storage_api_remote_volume_handling - Certificate string `json:"certificate" yaml:"certificate"` - - // Whether to use pull or push mode (for migration) - // Example: pull - // - // API extension: storage_api_remote_volume_handling - Mode string `json:"mode,omitempty" yaml:"mode,omitempty"` - - // Remote operation URL (for migration) - // Example: https://1.2.3.4:8443/1.0/operations/1721ae08-b6a8-416a-9614-3f89302466e1 - // - // API extension: storage_api_remote_volume_handling - Operation string `json:"operation,omitempty" yaml:"operation,omitempty"` - - // Map of migration websockets (for migration) - // Example: {"rsync": "RANDOM-STRING"} - // - // API extension: storage_api_remote_volume_handling - Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"` - - // Whether snapshots should be discarded (for migration) - // Example: false - // - // API extension: storage_api_volume_snapshots - VolumeOnly bool `json:"volume_only" yaml:"volume_only"` - - // Whether existing destination volume should be refreshed - // Example: false - // - // API extension: custom_volume_refresh - Refresh bool `json:"refresh" yaml:"refresh"` - - // Source project name - // Example: foo - // - // API extension: storage_api_project - Project string `json:"project,omitempty" yaml:"project,omitempty"` -} - -// Writable converts a full StorageVolume struct into a StorageVolumePut struct (filters read-only fields). -func (storageVolume *StorageVolume) Writable() StorageVolumePut { - return storageVolume.StorageVolumePut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_backup.go b/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_backup.go deleted file mode 100644 index b38e468e..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_backup.go +++ /dev/null @@ -1,70 +0,0 @@ -package api - -import ( - "time" -) - -// StoragePoolVolumeBackup represents a LXD volume backup -// -// swagger:model -// -// API extension: custom_volume_backup. -type StoragePoolVolumeBackup struct { - // Backup name - // Example: backup0 - Name string `json:"name" yaml:"name"` - - // When the backup was created - // Example: 2021-03-23T16:38:37.753398689-04:00 - CreatedAt time.Time `json:"created_at" yaml:"created_at"` - - // When the backup expires (gets auto-deleted) - // Example: 2021-03-23T17:38:37.753398689-04:00 - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` - - // Whether to ignore snapshots - // Example: false - VolumeOnly bool `json:"volume_only" yaml:"volume_only"` - - // Whether to use a pool-optimized binary format (instead of plain tarball) - // Example: true - OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"` -} - -// StoragePoolVolumeBackupsPost represents the fields available for a new LXD volume backup -// -// swagger:model -// -// API extension: custom_volume_backup. -type StoragePoolVolumeBackupsPost struct { - // Backup name - // Example: backup0 - Name string `json:"name" yaml:"name"` - - // When the backup expires (gets auto-deleted) - // Example: 2021-03-23T17:38:37.753398689-04:00 - ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"` - - // Whether to ignore snapshots - // Example: false - VolumeOnly bool `json:"volume_only" yaml:"volume_only"` - - // Whether to use a pool-optimized binary format (instead of plain tarball) - // Example: true - OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"` - - // What compression algorithm to use - // Example: gzip - CompressionAlgorithm string `json:"compression_algorithm" yaml:"compression_algorithm"` -} - -// StoragePoolVolumeBackupPost represents the fields available for the renaming of a volume backup -// -// swagger:model -// -// API extension: custom_volume_backup. -type StoragePoolVolumeBackupPost struct { - // New backup name - // Example: backup1 - Name string `json:"name" yaml:"name"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_snapshot.go b/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_snapshot.go deleted file mode 100644 index 32ff2c3a..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_snapshot.go +++ /dev/null @@ -1,83 +0,0 @@ -package api - -import ( - "time" -) - -// StorageVolumeSnapshotsPost represents the fields available for a new LXD storage volume snapshot -// -// swagger:model -// -// API extension: storage_api_volume_snapshots. -type StorageVolumeSnapshotsPost struct { - // Snapshot name - // Example: snap0 - Name string `json:"name" yaml:"name"` - - // When the snapshot expires (gets auto-deleted) - // Example: 2021-03-23T17:38:37.753398689-04:00 - // - // API extension: custom_volume_snapshot_expiry - ExpiresAt *time.Time `json:"expires_at" yaml:"expires_at"` -} - -// StorageVolumeSnapshotPost represents the fields required to rename/move a LXD storage volume snapshot -// -// swagger:model -// -// API extension: storage_api_volume_snapshots. -type StorageVolumeSnapshotPost struct { - // New snapshot name - // Example: snap1 - Name string `json:"name" yaml:"name"` -} - -// StorageVolumeSnapshot represents a LXD storage volume snapshot -// -// swagger:model -// -// API extension: storage_api_volume_snapshots. -type StorageVolumeSnapshot struct { - StorageVolumeSnapshotPut `json:",inline" yaml:",inline"` - - // Snapshot name - // Example: snap0 - Name string `json:"name" yaml:"name"` - - // Storage volume configuration map (refer to doc/storage.md) - // Example: {"zfs.remove_snapshots": "true", "size": "50GiB"} - Config map[string]string `json:"config" yaml:"config"` - - // The content type (filesystem or block) - // Example: filesystem - // - // API extension: custom_block_volumes - ContentType string `json:"content_type" yaml:"content_type"` - - // Volume snapshot creation timestamp - // Example: 2021-03-23T20:00:00-04:00 - // API extension: storage_volumes_created_at - CreatedAt time.Time `json:"created_at" yaml:"created_at"` -} - -// StorageVolumeSnapshotPut represents the modifiable fields of a LXD storage volume -// -// swagger:model -// -// API extension: storage_api_volume_snapshots. -type StorageVolumeSnapshotPut struct { - // Description of the storage volume - // Example: My custom volume - Description string `json:"description" yaml:"description"` - - // When the snapshot expires (gets auto-deleted) - // Example: 2021-03-23T17:38:37.753398689-04:00 - // - // API extension: custom_volume_snapshot_expiry - ExpiresAt *time.Time `json:"expires_at" yaml:"expires_at"` -} - -// Writable converts a full StorageVolumeSnapshot struct into a StorageVolumeSnapshotPut struct (filters read-only fields). -func (storageVolumeSnapshot *StorageVolumeSnapshot) Writable() StorageVolumeSnapshotPut { - return storageVolumeSnapshot.StorageVolumeSnapshotPut -} diff --git a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_state.go b/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_state.go deleted file mode 100644 index 9720b337..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_state.go +++ /dev/null @@ -1,28 +0,0 @@ -package api - -// StorageVolumeState represents the live state of the volume -// -// swagger:model -// -// API extension: storage_volume_state. -type StorageVolumeState struct { - // Volume usage - Usage *StorageVolumeStateUsage `json:"usage" yaml:"usage"` -} - -// StorageVolumeStateUsage represents the disk usage of a volume -// -// swagger:model -// -// API extension: storage_volume_state. -type StorageVolumeStateUsage struct { - // Used space in bytes - // Example: 1693552640 - Used uint64 `json:"used,omitempty" yaml:"used,omitempty"` - - // Storage volume size in bytes - // Example: 5189222192 - // - // API extension: storage_volume_state_total - Total int64 `json:"total" yaml:"total"` -} diff --git a/vendor/github.com/lxc/lxd/shared/api/url.go b/vendor/github.com/lxc/lxd/shared/api/url.go deleted file mode 100644 index f28e272c..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/url.go +++ /dev/null @@ -1,76 +0,0 @@ -package api - -import ( - "net/url" - "strings" -) - -// URL represents an endpoint for the LXD API. -type URL struct { - url.URL -} - -// NewURL creates a new URL. -func NewURL() *URL { - return &URL{} -} - -// Scheme sets the scheme of the URL. -func (u *URL) Scheme(scheme string) *URL { - u.URL.Scheme = scheme - - return u -} - -// Host sets the host of the URL. -func (u *URL) Host(host string) *URL { - u.URL.Host = host - - return u -} - -// Path sets the path of the URL from one or more path parts. -// It appends each of the pathParts (escaped using url.PathEscape) prefixed with "/" to the URL path. -func (u *URL) Path(pathParts ...string) *URL { - var b strings.Builder - - for _, pathPart := range pathParts { - b.WriteString("/") // Build an absolute URL. - b.WriteString(url.PathEscape(pathPart)) - } - - u.URL.Path = b.String() - - return u -} - -// Project sets the "project" query parameter in the URL if the projectName is not empty or "default". -func (u *URL) Project(projectName string) *URL { - if projectName != "default" && projectName != "" { - queryArgs := u.Query() - queryArgs.Add("project", projectName) - u.RawQuery = queryArgs.Encode() - } - - return u -} - -// Target sets the "target" query parameter in the URL if the clusterMemberName is not empty or "default". -func (u *URL) Target(clusterMemberName string) *URL { - if clusterMemberName != "" && clusterMemberName != "none" { - queryArgs := u.Query() - queryArgs.Add("target", clusterMemberName) - u.RawQuery = queryArgs.Encode() - } - - return u -} - -// WithQuery adds a given query parameter with its value to the URL. -func (u *URL) WithQuery(key string, value string) *URL { - queryArgs := u.Query() - queryArgs.Add(key, value) - u.RawQuery = queryArgs.Encode() - - return u -} diff --git a/vendor/github.com/lxc/lxd/shared/api/warning.go b/vendor/github.com/lxc/lxd/shared/api/warning.go deleted file mode 100644 index aaa7d820..00000000 --- a/vendor/github.com/lxc/lxd/shared/api/warning.go +++ /dev/null @@ -1,65 +0,0 @@ -package api - -import ( - "time" -) - -// Warning represents a warning entry. -// -// swagger:model -// -// API extension: warnings. -type Warning struct { - WarningPut `yaml:",inline"` - - // UUID of the warning - // Example: e9e9da0d-2538-4351-8047-46d4a8ae4dbb - UUID string `json:"uuid" yaml:"uuid"` - - // What cluster member this warning occurred on - // Example: node1 - Location string `json:"location" yaml:"location"` - - // The project the warning occurred in - // Example: default - Project string `json:"project" yaml:"project"` - - // Type type of warning - // Example: Couldn't find CGroup - Type string `json:"type" yaml:"type"` - - // The number of times this warning occurred - // Example: 1 - Count int `json:"count" yaml:"count"` - - // The first time this warning occurred - // Example: 2021-03-23T17:38:37.753398689-04:00 - FirstSeenAt time.Time `json:"first_seen_at" yaml:"first_seen_at"` - - // The last time this warning occurred - // Example: 2021-03-23T17:38:37.753398689-04:00 - LastSeenAt time.Time `json:"last_seen_at" yaml:"last_seen_at"` - - // The warning message - // Example: Couldn't find the CGroup blkio.weight, disk priority will be ignored - LastMessage string `json:"last_message" yaml:"last_message"` - - // The severity of this warning - // Example: low - Severity string `json:"severity" yaml:"severity"` - - // The entity affected by this warning - // Example: /1.0/instances/c1?project=default - EntityURL string `json:"entity_url" yaml:"entity_url"` -} - -// WarningPut represents the modifiable fields of a warning. -// -// swagger:model -// -// API extension: warnings. -type WarningPut struct { - // Status of the warning (new, acknowledged, or resolved) - // Example: new - Status string `json:"status" yaml:"status"` -} diff --git a/vendor/github.com/lxc/lxd/shared/archive.go b/vendor/github.com/lxc/lxd/shared/archive.go deleted file mode 100644 index d9090d6f..00000000 --- a/vendor/github.com/lxc/lxd/shared/archive.go +++ /dev/null @@ -1,60 +0,0 @@ -package shared - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// DetectCompression detects compression from a file name. -func DetectCompression(fname string) ([]string, string, []string, error) { - f, err := os.Open(fname) - if err != nil { - return nil, "", nil, err - } - - defer func() { _ = f.Close() }() - - return DetectCompressionFile(f) -} - -// DetectCompressionFile detects the compression type of a file and returns the tar arguments needed -// to unpack the file, compression type (in the form of a file extension), and the command needed -// to decompress the file to an uncompressed tarball. -func DetectCompressionFile(f io.Reader) ([]string, string, []string, error) { - // read header parts to detect compression method - // bz2 - 2 bytes, 'BZ' signature/magic number - // gz - 2 bytes, 0x1f 0x8b - // lzma - 6 bytes, { [0x000, 0xE0], '7', 'z', 'X', 'Z', 0x00 } - - // xy - 6 bytes, header format { 0xFD, '7', 'z', 'X', 'Z', 0x00 } - // tar - 263 bytes, trying to get ustar from 257 - 262 - header := make([]byte, 263) - _, err := f.Read(header) - if err != nil { - return nil, "", nil, err - } - - switch { - case bytes.Equal(header[0:2], []byte{'B', 'Z'}): - return []string{"-jxf"}, ".tar.bz2", []string{"bzip2", "-d"}, nil - case bytes.Equal(header[0:2], []byte{0x1f, 0x8b}): - return []string{"-zxf"}, ".tar.gz", []string{"gzip", "-d"}, nil - case (bytes.Equal(header[1:5], []byte{'7', 'z', 'X', 'Z'}) && header[0] == 0xFD): - return []string{"-Jxf"}, ".tar.xz", []string{"xz", "-d"}, nil - case (bytes.Equal(header[1:5], []byte{'7', 'z', 'X', 'Z'}) && header[0] != 0xFD): - return []string{"--lzma", "-xf"}, ".tar.lzma", []string{"lzma", "-d"}, nil - case bytes.Equal(header[0:3], []byte{0x5d, 0x00, 0x00}): - return []string{"--lzma", "-xf"}, ".tar.lzma", []string{"lzma", "-d"}, nil - case bytes.Equal(header[257:262], []byte{'u', 's', 't', 'a', 'r'}): - return []string{"-xf"}, ".tar", []string{}, nil - case bytes.Equal(header[0:4], []byte{'h', 's', 'q', 's'}): - return []string{"-xf"}, ".squashfs", []string{"sqfs2tar", "--no-skip"}, nil - case bytes.Equal(header[0:3], []byte{'Q', 'F', 'I'}): - return []string{""}, ".qcow2", []string{"qemu-img", "convert", "-O", "raw"}, nil - case bytes.Equal(header[0:4], []byte{0x28, 0xb5, 0x2f, 0xfd}): - return []string{"--zstd", "-xf"}, ".tar.zst", []string{"zstd", "-d"}, nil - default: - return nil, "", nil, fmt.Errorf("Unsupported compression") - } -} diff --git a/vendor/github.com/lxc/lxd/shared/cancel/canceller.go b/vendor/github.com/lxc/lxd/shared/cancel/canceller.go deleted file mode 100644 index 1f830e6d..00000000 --- a/vendor/github.com/lxc/lxd/shared/cancel/canceller.go +++ /dev/null @@ -1,21 +0,0 @@ -package cancel - -import ( - "context" -) - -// Canceller is a simple wrapper for a cancellable context which makes the associated context.CancelFunc more easily -// accessible. -type Canceller struct { - context.Context - Cancel context.CancelFunc -} - -// New returns a new canceller with the parent context. -func New(ctx context.Context) *Canceller { - ctx, cancel := context.WithCancel(ctx) - return &Canceller{ - Context: ctx, - Cancel: cancel, - } -} diff --git a/vendor/github.com/lxc/lxd/shared/cancel/http.go b/vendor/github.com/lxc/lxd/shared/cancel/http.go deleted file mode 100644 index c682d856..00000000 --- a/vendor/github.com/lxc/lxd/shared/cancel/http.go +++ /dev/null @@ -1,81 +0,0 @@ -package cancel - -import ( - "context" - "fmt" - "net/http" - "sync" -) - -// HTTPRequestCanceller tracks a cancelable operation. -type HTTPRequestCanceller struct { - reqCancel map[*http.Request]context.CancelFunc - lock sync.Mutex -} - -// NewHTTPRequestCanceller returns a new HTTPRequestCanceller struct. -func NewHTTPRequestCanceller() *HTTPRequestCanceller { - c := HTTPRequestCanceller{} - - c.lock.Lock() - c.reqCancel = make(map[*http.Request]context.CancelFunc) - c.lock.Unlock() - - return &c -} - -// Cancelable indicates whether there are operations that support cancellation. -func (c *HTTPRequestCanceller) Cancelable() bool { - c.lock.Lock() - length := len(c.reqCancel) - c.lock.Unlock() - - return length > 0 -} - -// Cancel will attempt to cancel all ongoing operations. -func (c *HTTPRequestCanceller) Cancel() error { - if !c.Cancelable() { - return fmt.Errorf("This operation can't be canceled at this time") - } - - c.lock.Lock() - for req, cancel := range c.reqCancel { - cancel() - delete(c.reqCancel, req) - } - - c.lock.Unlock() - - return nil -} - -// CancelableDownload performs an http request and allows for it to be canceled at any time. -func CancelableDownload(c *HTTPRequestCanceller, client *http.Client, req *http.Request) (*http.Response, chan bool, error) { - chDone := make(chan bool) - ctx, cancel := context.WithCancel(req.Context()) - req = req.WithContext(ctx) - if c != nil { - c.lock.Lock() - c.reqCancel[req] = cancel - c.lock.Unlock() - } - - go func() { - <-chDone - if c != nil { - c.lock.Lock() - cancel() - delete(c.reqCancel, req) - c.lock.Unlock() - } - }() - - resp, err := client.Do(req) - if err != nil { - close(chDone) - return nil, nil, err - } - - return resp, chDone, nil -} diff --git a/vendor/github.com/lxc/lxd/shared/cert.go b/vendor/github.com/lxc/lxd/shared/cert.go deleted file mode 100644 index 0804c663..00000000 --- a/vendor/github.com/lxc/lxd/shared/cert.go +++ /dev/null @@ -1,574 +0,0 @@ -// http://golang.org/src/pkg/crypto/tls/generate_cert.go -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package shared - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "math/big" - "net" - "net/http" - "os" - "os/user" - "path/filepath" - "time" - - "github.com/lxc/lxd/shared/api" -) - -// KeyPairAndCA returns a CertInfo object with a reference to the key pair and -// (optionally) CA certificate located in the given directory and having the -// given name prefix -// -// The naming conversion for the various files is: -// -// .crt -> public key -// .key -> private key -// .ca -> CA certificate -// -// If no public/private key files are found, a new key pair will be generated -// and saved on disk. -// -// If a CA certificate is found, it will be returned as well as second return -// value (otherwise it will be nil). -func KeyPairAndCA(dir, prefix string, kind CertKind, addHosts bool) (*CertInfo, error) { - certFilename := filepath.Join(dir, prefix+".crt") - keyFilename := filepath.Join(dir, prefix+".key") - - // Ensure that the certificate exists, or create a new one if it does - // not. - err := FindOrGenCert(certFilename, keyFilename, kind == CertClient, addHosts) - if err != nil { - return nil, err - } - - // Load the certificate. - keypair, err := tls.LoadX509KeyPair(certFilename, keyFilename) - if err != nil { - return nil, err - } - - // If available, load the CA data as well. - caFilename := filepath.Join(dir, prefix+".ca") - var ca *x509.Certificate - if PathExists(caFilename) { - ca, err = ReadCert(caFilename) - if err != nil { - return nil, err - } - } - - crlFilename := filepath.Join(dir, "ca.crl") - var crl *pkix.CertificateList - if PathExists(crlFilename) { - data, err := os.ReadFile(crlFilename) - if err != nil { - return nil, err - } - - crl, err = x509.ParseCRL(data) - if err != nil { - return nil, err - } - } - - info := &CertInfo{ - keypair: keypair, - ca: ca, - crl: crl, - } - - return info, nil -} - -// KeyPairFromRaw returns a CertInfo from the raw certificate and key. -func KeyPairFromRaw(certificate []byte, key []byte) (*CertInfo, error) { - keypair, err := tls.X509KeyPair(certificate, key) - if err != nil { - return nil, err - } - - return &CertInfo{ - keypair: keypair, - }, nil -} - -// CertInfo captures TLS certificate information about a certain public/private -// keypair and an optional CA certificate and CRL. -// -// Given LXD's support for PKI setups, these two bits of information are -// normally used and passed around together, so this structure helps with that -// (see doc/security.md for more details). -type CertInfo struct { - keypair tls.Certificate - ca *x509.Certificate - crl *pkix.CertificateList -} - -// KeyPair returns the public/private key pair. -func (c *CertInfo) KeyPair() tls.Certificate { - return c.keypair -} - -// CA returns the CA certificate. -func (c *CertInfo) CA() *x509.Certificate { - return c.ca -} - -// PublicKey is a convenience to encode the underlying public key to ASCII. -func (c *CertInfo) PublicKey() []byte { - data := c.KeyPair().Certificate[0] - return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: data}) -} - -// PublicKeyX509 is a convenience to return the underlying public key as an *x509.Certificate. -func (c *CertInfo) PublicKeyX509() (*x509.Certificate, error) { - return x509.ParseCertificate(c.KeyPair().Certificate[0]) -} - -// PrivateKey is a convenience to encode the underlying private key. -func (c *CertInfo) PrivateKey() []byte { - ecKey, ok := c.KeyPair().PrivateKey.(*ecdsa.PrivateKey) - if ok { - data, err := x509.MarshalECPrivateKey(ecKey) - if err != nil { - return nil - } - - return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data}) - } - - rsaKey, ok := c.KeyPair().PrivateKey.(*rsa.PrivateKey) - if ok { - data := x509.MarshalPKCS1PrivateKey(rsaKey) - return pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: data}) - } - - return nil -} - -// Fingerprint returns the fingerprint of the public key. -func (c *CertInfo) Fingerprint() string { - fingerprint, err := CertFingerprintStr(string(c.PublicKey())) - // Parsing should never fail, since we generated the cert ourselves, - // but let's check the error for good measure. - if err != nil { - panic("invalid public key material") - } - - return fingerprint -} - -// CRL returns the certificate revocation list. -func (c *CertInfo) CRL() *pkix.CertificateList { - return c.crl -} - -// CertKind defines the kind of certificate to generate from scratch in -// KeyPairAndCA when it's not there. -// -// The two possible kinds are client and server, and they differ in the -// ext-key-usage bitmaps. See GenerateMemCert for more details. -type CertKind int - -// Possible kinds of certificates. -const ( - CertClient CertKind = iota - CertServer -) - -// TestingKeyPair returns CertInfo object initialized with a test keypair. It's -// meant to be used only by tests. -func TestingKeyPair() *CertInfo { - keypair, err := tls.X509KeyPair(testCertPEMBlock, testKeyPEMBlock) - if err != nil { - panic(fmt.Sprintf("invalid X509 keypair material: %v", err)) - } - - cert := &CertInfo{ - keypair: keypair, - } - - return cert -} - -// TestingAltKeyPair returns CertInfo object initialized with a test keypair -// which differs from the one returned by TestCertInfo. It's meant to be used -// only by tests. -func TestingAltKeyPair() *CertInfo { - keypair, err := tls.X509KeyPair(testAltCertPEMBlock, testAltKeyPEMBlock) - if err != nil { - panic(fmt.Sprintf("invalid X509 keypair material: %v", err)) - } - - cert := &CertInfo{ - keypair: keypair, - } - - return cert -} - -/* - * Generate a list of names for which the certificate will be valid. - * This will include the hostname and ip address. - */ -func mynames() ([]string, error) { - h, err := os.Hostname() - if err != nil { - return nil, err - } - - ret := []string{h, "127.0.0.1/8", "::1/128"} - return ret, nil -} - -// FindOrGenCert generates a keypair if needed. -// The type argument is false for server, true for client. -func FindOrGenCert(certf string, keyf string, certtype bool, addHosts bool) error { - if PathExists(certf) && PathExists(keyf) { - return nil - } - - /* If neither stat succeeded, then this is our first run and we - * need to generate cert and privkey */ - err := GenCert(certf, keyf, certtype, addHosts) - if err != nil { - return err - } - - return nil -} - -// GenCert will create and populate a certificate file and a key file. -func GenCert(certf string, keyf string, certtype bool, addHosts bool) error { - /* Create the basenames if needed */ - dir := filepath.Dir(certf) - err := os.MkdirAll(dir, 0750) - if err != nil { - return err - } - - dir = filepath.Dir(keyf) - err = os.MkdirAll(dir, 0750) - if err != nil { - return err - } - - certBytes, keyBytes, err := GenerateMemCert(certtype, addHosts) - if err != nil { - return err - } - - certOut, err := os.Create(certf) - if err != nil { - return fmt.Errorf("Failed to open %s for writing: %w", certf, err) - } - - _, err = certOut.Write(certBytes) - if err != nil { - return fmt.Errorf("Failed to write cert file: %w", err) - } - - err = certOut.Close() - if err != nil { - return fmt.Errorf("Failed to close cert file: %w", err) - } - - keyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return fmt.Errorf("Failed to open %s for writing: %w", keyf, err) - } - - _, err = keyOut.Write(keyBytes) - if err != nil { - return fmt.Errorf("Failed to write key file: %w", err) - } - - err = keyOut.Close() - if err != nil { - return fmt.Errorf("Failed to close key file: %w", err) - } - - return nil -} - -// GenerateMemCert creates client or server certificate and key pair, -// returning them as byte arrays in memory. -func GenerateMemCert(client bool, addHosts bool) ([]byte, []byte, error) { - privk, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - if err != nil { - return nil, nil, fmt.Errorf("Failed to generate key: %w", err) - } - - validFrom := time.Now() - validTo := validFrom.Add(10 * 365 * 24 * time.Hour) - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, nil, fmt.Errorf("Failed to generate serial number: %w", err) - } - - userEntry, err := user.Current() - var username string - if err == nil { - username = userEntry.Username - if username == "" { - username = "UNKNOWN" - } - } else { - username = "UNKNOWN" - } - - hostname, err := os.Hostname() - if err != nil { - hostname = "UNKNOWN" - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - Organization: []string{"linuxcontainers.org"}, - CommonName: fmt.Sprintf("%s@%s", username, hostname), - }, - NotBefore: validFrom, - NotAfter: validTo, - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - BasicConstraintsValid: true, - } - - if client { - template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} - } else { - template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} - } - - if addHosts { - hosts, err := mynames() - if err != nil { - return nil, nil, fmt.Errorf("Failed to get my hostname: %w", err) - } - - for _, h := range hosts { - ip, _, err := net.ParseCIDR(h) - if err == nil { - if !ip.IsLinkLocalUnicast() && !ip.IsLinkLocalMulticast() { - template.IPAddresses = append(template.IPAddresses, ip) - } - } else { - template.DNSNames = append(template.DNSNames, h) - } - } - } else if !client { - template.DNSNames = []string{"unspecified"} - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privk.PublicKey, privk) - if err != nil { - return nil, nil, fmt.Errorf("Failed to create certificate: %w", err) - } - - data, err := x509.MarshalECPrivateKey(privk) - if err != nil { - return nil, nil, err - } - - cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - key := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data}) - - return cert, key, nil -} - -func ReadCert(fpath string) (*x509.Certificate, error) { - cf, err := os.ReadFile(fpath) - if err != nil { - return nil, err - } - - certBlock, _ := pem.Decode(cf) - if certBlock == nil { - return nil, fmt.Errorf("Invalid certificate file") - } - - return x509.ParseCertificate(certBlock.Bytes) -} - -func CertFingerprint(cert *x509.Certificate) string { - return fmt.Sprintf("%x", sha256.Sum256(cert.Raw)) -} - -func CertFingerprintStr(c string) (string, error) { - pemCertificate, _ := pem.Decode([]byte(c)) - if pemCertificate == nil { - return "", fmt.Errorf("invalid certificate") - } - - cert, err := x509.ParseCertificate(pemCertificate.Bytes) - if err != nil { - return "", err - } - - return CertFingerprint(cert), nil -} - -func GetRemoteCertificate(address string, useragent string) (*x509.Certificate, error) { - // Setup a permissive TLS config - tlsConfig, err := GetTLSConfig("", "", "", nil) - if err != nil { - return nil, err - } - - tlsConfig.InsecureSkipVerify = true - - tr := &http.Transport{ - TLSClientConfig: tlsConfig, - DialContext: RFC3493Dialer, - Proxy: ProxyFromEnvironment, - ExpectContinueTimeout: time.Second * 30, - ResponseHeaderTimeout: time.Second * 3600, - TLSHandshakeTimeout: time.Second * 5, - } - - // Connect - req, err := http.NewRequest("GET", address, nil) - if err != nil { - return nil, err - } - - if useragent != "" { - req.Header.Set("User-Agent", useragent) - } - - client := &http.Client{Transport: tr} - resp, err := client.Do(req) - if err != nil { - return nil, err - } - - // Retrieve the certificate - if resp.TLS == nil || len(resp.TLS.PeerCertificates) == 0 { - return nil, fmt.Errorf("Unable to read remote TLS certificate") - } - - return resp.TLS.PeerCertificates[0], nil -} - -// CertificateTokenDecode decodes a base64 and JSON encoded certificate add token. -func CertificateTokenDecode(input string) (*api.CertificateAddToken, error) { - joinTokenJSON, err := base64.StdEncoding.DecodeString(input) - if err != nil { - return nil, err - } - - var j api.CertificateAddToken - err = json.Unmarshal(joinTokenJSON, &j) - if err != nil { - return nil, err - } - - if j.ClientName == "" { - return nil, fmt.Errorf("No client name in certificate add token") - } - - if len(j.Addresses) < 1 { - return nil, fmt.Errorf("No server addresses in certificate add token") - } - - if j.Secret == "" { - return nil, fmt.Errorf("No secret in certificate add token") - } - - if j.Fingerprint == "" { - return nil, fmt.Errorf("No certificate fingerprint in certificate add token") - } - - return &j, nil -} - -// GenerateTrustCertificate converts the specified serverCert and serverName into an api.Certificate suitable for -// use as a trusted cluster server certificate. -func GenerateTrustCertificate(cert *CertInfo, name string) (*api.Certificate, error) { - block, _ := pem.Decode(cert.PublicKey()) - if block == nil { - return nil, fmt.Errorf("Failed to decode certificate") - } - - fingerprint, err := CertFingerprintStr(string(cert.PublicKey())) - if err != nil { - return nil, fmt.Errorf("Failed to calculate fingerprint: %w", err) - } - - certificate := base64.StdEncoding.EncodeToString(block.Bytes) - apiCert := api.Certificate{ - CertificatePut: api.CertificatePut{ - Certificate: certificate, - Name: name, - Type: api.CertificateTypeServer, // Server type for intra-member communication. - }, - Fingerprint: fingerprint, - } - - return &apiCert, nil -} - -var testCertPEMBlock = []byte(` ------BEGIN CERTIFICATE----- -MIIBzjCCAVSgAwIBAgIUJAEAVl1oOU+OQxj5aUrRdJDwuWEwCgYIKoZIzj0EAwMw -EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMjA0WhcNMzIwNDEwMDQy -MjA0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA -BGAmiHj98SXz0ZW1AxheW+zkFyPz5ZrZoZDY7NezGQpoH4KZ1x08X1jw67wv+M0c -W+yd2BThOcvItBO+HokJ03lgL6cgDojcmEEfZntgmGHjG7USqh48TrQtmt/uSJsD -4qNpMGcwHQYDVR0OBBYEFPOsHk3ewn4abmyzLgOXs3Bg8Dq9MB8GA1UdIwQYMBaA -FPOsHk3ewn4abmyzLgOXs3Bg8Dq9MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w -C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMCKR+gWwN9VWXct8tDxCvlA6 -+JP7iQPnLetiSLpyN4HEVQYP+EQhDJIJIy6+CwlUCQIxANQXfaTTrcVuhAb9dwVI -9bcu4cRGLEtbbNuOW/y+q7mXG0LtE/frDv/QrNpKhnnOzA== ------END CERTIFICATE----- -`) - -var testKeyPEMBlock = []byte(` ------BEGIN PRIVATE KEY----- -MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDBzlLjHjIxc5XHm95zB -p8cnUtHQcmdBy2Ekv+bbiaS/8M8Twp7Jvi47SruAY5gESK2hZANiAARgJoh4/fEl -89GVtQMYXlvs5Bcj8+Wa2aGQ2OzXsxkKaB+CmdcdPF9Y8Ou8L/jNHFvsndgU4TnL -yLQTvh6JCdN5YC+nIA6I3JhBH2Z7YJhh4xu1EqoePE60LZrf7kibA+I= ------END PRIVATE KEY----- -`) - -var testAltCertPEMBlock = []byte(` ------BEGIN CERTIFICATE----- -MIIBzjCCAVSgAwIBAgIUK41+7aTdYLu3x3vGoDOqat10TmQwCgYIKoZIzj0EAwMw -EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMzM0WhcNMzIwNDEwMDQy -MzM0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA -BAHv2a3obPHcQVDQouW/A/M/l2xHUFINWvCIhA5gWCtj9RLWKD6veBR133qSr9w0 -/DT96ZoTw7kJu/BQQFlRafmfMRTZcvXHLoPMoihBEkDqTGl2qwEQea/0MPi3thwJ -wqNpMGcwHQYDVR0OBBYEFKoF8yXx9lgBTQvZL2M8YqV4c4c5MB8GA1UdIwQYMBaA -FKoF8yXx9lgBTQvZL2M8YqV4c4c5MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w -C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMQCcpYeYWmIL7QdUCGGRT8gt -YhQSciGzXlyncToAJ+A91dXGbGYvqfIti7R00sR+8cwCMAxglHP7iFzWrzn1M/Z9 -H5bVDjnWZvsgEblThausOYxWxzxD+5dT5rItoVZOJhfPLw== ------END CERTIFICATE----- -`) - -var testAltKeyPEMBlock = []byte(` ------BEGIN PRIVATE KEY----- -MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDC3/Fv+SmNLfBy2AuUD -O3zHq1GMLvVfk3JkDIqqbKPJeEa2rS44bemExc8v85wVYTmhZANiAAQB79mt6Gzx -3EFQ0KLlvwPzP5dsR1BSDVrwiIQOYFgrY/US1ig+r3gUdd96kq/cNPw0/emaE8O5 -CbvwUEBZUWn5nzEU2XL1xy6DzKIoQRJA6kxpdqsBEHmv9DD4t7YcCcI= ------END PRIVATE KEY----- -`) diff --git a/vendor/github.com/lxc/lxd/shared/cgo.go b/vendor/github.com/lxc/lxd/shared/cgo.go deleted file mode 100644 index 8329699a..00000000 --- a/vendor/github.com/lxc/lxd/shared/cgo.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build linux && cgo - -package shared - -// #cgo CFLAGS: -std=gnu11 -Wvla -Werror -fvisibility=hidden -Winit-self -// #cgo CFLAGS: -Wformat=2 -Wshadow -Wendif-labels -fasynchronous-unwind-tables -// #cgo CFLAGS: -pipe --param=ssp-buffer-size=4 -g -Wunused -// #cgo CFLAGS: -Werror=implicit-function-declaration -// #cgo CFLAGS: -Werror=return-type -Wendif-labels -Werror=overflow -// #cgo CFLAGS: -Wnested-externs -fexceptions -// #cgo LDFLAGS: -lutil -lpthread -import "C" diff --git a/vendor/github.com/lxc/lxd/shared/instance.go b/vendor/github.com/lxc/lxd/shared/instance.go deleted file mode 100644 index 7b2fa4cd..00000000 --- a/vendor/github.com/lxc/lxd/shared/instance.go +++ /dev/null @@ -1,397 +0,0 @@ -package shared - -import ( - "errors" - "fmt" - "strconv" - "strings" - "time" - - "github.com/lxc/lxd/lxd/instance/instancetype" - "github.com/lxc/lxd/shared/units" - "github.com/lxc/lxd/shared/validate" -) - -// InstanceAction indicates the type of action being performed. -type InstanceAction string - -// InstanceAction types. -const ( - Stop InstanceAction = "stop" - Start InstanceAction = "start" - Restart InstanceAction = "restart" - Freeze InstanceAction = "freeze" - Unfreeze InstanceAction = "unfreeze" -) - -// ConfigVolatilePrefix indicates the prefix used for volatile config keys. -const ConfigVolatilePrefix = "volatile." - -// IsRootDiskDevice returns true if the given device representation is configured as root disk for -// an instance. It typically get passed a specific entry of api.Instance.Devices. -func IsRootDiskDevice(device map[string]string) bool { - // Root disk devices also need a non-empty "pool" property, but we can't check that here - // because this function is used with clients talking to older servers where there was no - // concept of a storage pool, and also it is used for migrating from old to new servers. - // The validation of the non-empty "pool" property is done inside the disk device itself. - if device["type"] == "disk" && device["path"] == "/" && device["source"] == "" { - return true - } - - return false -} - -// ErrNoRootDisk means there is no root disk device found. -var ErrNoRootDisk = fmt.Errorf("No root device could be found") - -// GetRootDiskDevice returns the instance device that is configured as root disk. -// Returns the device name and device config map. -func GetRootDiskDevice(devices map[string]map[string]string) (string, map[string]string, error) { - var devName string - var dev map[string]string - - for n, d := range devices { - if IsRootDiskDevice(d) { - if devName != "" { - return "", nil, fmt.Errorf("More than one root device found") - } - - devName = n - dev = d - } - } - - if devName != "" { - return devName, dev, nil - } - - return "", nil, ErrNoRootDisk -} - -// HugePageSizeKeys is a list of known hugepage size configuration keys. -var HugePageSizeKeys = [...]string{"limits.hugepages.64KB", "limits.hugepages.1MB", "limits.hugepages.2MB", "limits.hugepages.1GB"} - -// HugePageSizeSuffix contains the list of known hugepage size suffixes. -var HugePageSizeSuffix = [...]string{"64KB", "1MB", "2MB", "1GB"} - -// InstanceConfigKeysAny is a map of config key to validator. (keys applying to containers AND virtual machines). -var InstanceConfigKeysAny = map[string]func(value string) error{ - "boot.autostart": validate.Optional(validate.IsBool), - "boot.autostart.delay": validate.Optional(validate.IsInt64), - "boot.autostart.priority": validate.Optional(validate.IsInt64), - "boot.stop.priority": validate.Optional(validate.IsInt64), - "boot.host_shutdown_timeout": validate.Optional(validate.IsInt64), - - "cloud-init.network-config": validate.Optional(validate.IsYAML), - "cloud-init.user-data": validate.Optional(validate.IsCloudInitUserData), - "cloud-init.vendor-data": validate.Optional(validate.IsCloudInitUserData), - - "cluster.evacuate": validate.Optional(validate.IsOneOf("auto", "migrate", "live-migrate", "stop")), - - "limits.cpu": validate.Optional(validate.IsValidCPUSet), - "limits.disk.priority": validate.Optional(validate.IsPriority), - "limits.memory": func(value string) error { - if value == "" { - return nil - } - - if strings.HasSuffix(value, "%") { - num, err := strconv.ParseInt(strings.TrimSuffix(value, "%"), 10, 64) - if err != nil { - return err - } - - if num == 0 { - return errors.New("Memory limit can't be 0%") - } - - return nil - } - - num, err := units.ParseByteSizeString(value) - if err != nil { - return err - } - - if num == 0 { - return fmt.Errorf("Memory limit can't be 0") - } - - return nil - }, - "limits.network.priority": validate.Optional(validate.IsPriority), - - // Caller is responsible for full validation of any raw.* value. - "raw.apparmor": validate.IsAny, - - "security.devlxd": validate.Optional(validate.IsBool), - "security.protection.delete": validate.Optional(validate.IsBool), - - "snapshots.schedule": validate.Optional(validate.IsCron([]string{"@hourly", "@daily", "@midnight", "@weekly", "@monthly", "@annually", "@yearly", "@startup", "@never"})), - "snapshots.schedule.stopped": validate.Optional(validate.IsBool), - "snapshots.pattern": validate.IsAny, - "snapshots.expiry": func(value string) error { - // Validate expression - _, err := GetExpiry(time.Time{}, value) - return err - }, - - // Volatile keys. - "volatile.apply_template": validate.IsAny, - "volatile.base_image": validate.IsAny, - "volatile.cloud-init.instance-id": validate.Optional(validate.IsUUID), - "volatile.evacuate.origin": validate.IsAny, - "volatile.last_state.idmap": validate.IsAny, - "volatile.last_state.power": validate.IsAny, - "volatile.last_state.ready": validate.IsBool, - "volatile.idmap.base": validate.IsAny, - "volatile.idmap.current": validate.IsAny, - "volatile.idmap.next": validate.IsAny, - "volatile.apply_quota": validate.IsAny, - "volatile.uuid": validate.Optional(validate.IsUUID), - "volatile.vsock_id": validate.Optional(validate.IsInt64), - "volatile.uuid.generation": validate.Optional(validate.IsUUID), - - // Caller is responsible for full validation of any raw.* value. - "raw.idmap": validate.IsAny, -} - -// InstanceConfigKeysContainer is a map of config key to validator. (keys applying to containers only). -var InstanceConfigKeysContainer = map[string]func(value string) error{ - "limits.cpu.allowance": func(value string) error { - if value == "" { - return nil - } - - if strings.HasSuffix(value, "%") { - // Percentage based allocation - _, err := strconv.Atoi(strings.TrimSuffix(value, "%")) - if err != nil { - return err - } - - return nil - } - - // Time based allocation - fields := strings.SplitN(value, "/", 2) - if len(fields) != 2 { - return fmt.Errorf("Invalid allowance: %s", value) - } - - _, err := strconv.Atoi(strings.TrimSuffix(fields[0], "ms")) - if err != nil { - return err - } - - _, err = strconv.Atoi(strings.TrimSuffix(fields[1], "ms")) - if err != nil { - return err - } - - return nil - }, - "limits.cpu.priority": validate.Optional(validate.IsPriority), - "limits.hugepages.64KB": validate.Optional(validate.IsSize), - "limits.hugepages.1MB": validate.Optional(validate.IsSize), - "limits.hugepages.2MB": validate.Optional(validate.IsSize), - "limits.hugepages.1GB": validate.Optional(validate.IsSize), - "limits.memory.enforce": validate.Optional(validate.IsOneOf("soft", "hard")), - - "limits.memory.swap": validate.Optional(validate.IsBool), - "limits.memory.swap.priority": validate.Optional(validate.IsPriority), - "limits.processes": validate.Optional(validate.IsInt64), - - "linux.kernel_modules": validate.IsAny, - - "migration.incremental.memory": validate.Optional(validate.IsBool), - "migration.incremental.memory.iterations": validate.Optional(validate.IsUint32), - "migration.incremental.memory.goal": validate.Optional(validate.IsUint32), - - "nvidia.runtime": validate.Optional(validate.IsBool), - "nvidia.driver.capabilities": validate.IsAny, - "nvidia.require.cuda": validate.IsAny, - "nvidia.require.driver": validate.IsAny, - - // Caller is responsible for full validation of any raw.* value. - "raw.lxc": validate.IsAny, - "raw.seccomp": validate.IsAny, - - "security.devlxd.images": validate.Optional(validate.IsBool), - - "security.idmap.base": validate.Optional(validate.IsUint32), - "security.idmap.isolated": validate.Optional(validate.IsBool), - "security.idmap.size": validate.Optional(validate.IsUint32), - - "security.nesting": validate.Optional(validate.IsBool), - "security.privileged": validate.Optional(validate.IsBool), - "security.protection.shift": validate.Optional(validate.IsBool), - - "security.syscalls.allow": validate.IsAny, - "security.syscalls.blacklist_default": validate.Optional(validate.IsBool), - "security.syscalls.blacklist_compat": validate.Optional(validate.IsBool), - "security.syscalls.blacklist": validate.IsAny, - "security.syscalls.deny_default": validate.Optional(validate.IsBool), - "security.syscalls.deny_compat": validate.Optional(validate.IsBool), - "security.syscalls.deny": validate.IsAny, - "security.syscalls.intercept.bpf": validate.Optional(validate.IsBool), - "security.syscalls.intercept.bpf.devices": validate.Optional(validate.IsBool), - "security.syscalls.intercept.mknod": validate.Optional(validate.IsBool), - "security.syscalls.intercept.mount": validate.Optional(validate.IsBool), - "security.syscalls.intercept.mount.allowed": validate.IsAny, - "security.syscalls.intercept.mount.fuse": validate.IsAny, - "security.syscalls.intercept.mount.shift": validate.Optional(validate.IsBool), - "security.syscalls.intercept.sched_setscheduler": validate.Optional(validate.IsBool), - "security.syscalls.intercept.setxattr": validate.Optional(validate.IsBool), - "security.syscalls.intercept.sysinfo": validate.Optional(validate.IsBool), - "security.syscalls.whitelist": validate.IsAny, -} - -// InstanceConfigKeysVM is a map of config key to validator. (keys applying to VM only). -var InstanceConfigKeysVM = map[string]func(value string) error{ - "limits.memory.hugepages": validate.Optional(validate.IsBool), - - "migration.stateful": validate.Optional(validate.IsBool), - - // Caller is responsible for full validation of any raw.* value. - "raw.qemu": validate.IsAny, - "raw.qemu.conf": validate.IsAny, - - "security.agent.metrics": validate.Optional(validate.IsBool), - "security.secureboot": validate.Optional(validate.IsBool), - "security.sev": validate.Optional(validate.IsBool), - "security.sev.policy.es": validate.Optional(validate.IsBool), - "security.sev.session.dh": validate.Optional(validate.IsAny), - "security.sev.session.data": validate.Optional(validate.IsAny), - - "agent.nic_config": validate.Optional(validate.IsBool), - - "volatile.apply_nvram": validate.Optional(validate.IsBool), -} - -// ConfigKeyChecker returns a function that will check whether or not -// a provide value is valid for the associate config key. Returns an -// error if the key is not known. The checker function only performs -// syntactic checking of the value, semantic and usage checking must -// be done by the caller. User defined keys are always considered to -// be valid, e.g. user.* and environment.* keys. -func ConfigKeyChecker(key string, instanceType instancetype.Type) (func(value string) error, error) { - f, ok := InstanceConfigKeysAny[key] - if ok { - return f, nil - } - - if instanceType == instancetype.Any || instanceType == instancetype.Container { - f, ok := InstanceConfigKeysContainer[key] - if ok { - return f, nil - } - } - - if instanceType == instancetype.Any || instanceType == instancetype.VM { - f, ok := InstanceConfigKeysVM[key] - if ok { - return f, nil - } - } - - if strings.HasPrefix(key, ConfigVolatilePrefix) { - if strings.HasSuffix(key, ".hwaddr") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".name") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".host_name") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".mtu") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".created") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".id") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".vlan") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".spoofcheck") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".last_state.vf.parent") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".apply_quota") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".ceph_rbd") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".driver") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".uuid") { - return validate.IsAny, nil - } - - if strings.HasSuffix(key, ".last_state.ready") { - return validate.IsBool, nil - } - } - - if strings.HasPrefix(key, "environment.") { - return validate.IsAny, nil - } - - if strings.HasPrefix(key, "user.") { - return validate.IsAny, nil - } - - if strings.HasPrefix(key, "image.") { - return validate.IsAny, nil - } - - if strings.HasPrefix(key, "limits.kernel.") && - (len(key) > len("limits.kernel.")) { - return validate.IsAny, nil - } - - if (instanceType == instancetype.Any || instanceType == instancetype.Container) && - strings.HasPrefix(key, "linux.sysctl.") { - return validate.IsAny, nil - } - - return nil, fmt.Errorf("Unknown configuration key: %s", key) -} - -// InstanceIncludeWhenCopying is used to decide whether to include a config item or not when copying an instance. -// The remoteCopy argument indicates if the copy is remote (i.e between LXD nodes) as this affects the keys kept. -func InstanceIncludeWhenCopying(configKey string, remoteCopy bool) bool { - if configKey == "volatile.base_image" { - return true // Include volatile.base_image always as it can help optimize copies. - } - - if configKey == "volatile.last_state.idmap" && !remoteCopy { - return true // Include volatile.last_state.idmap when doing local copy to avoid needless remapping. - } - - if strings.HasPrefix(configKey, ConfigVolatilePrefix) { - return false // Exclude all other volatile keys. - } - - return true // Keep all other keys. -} diff --git a/vendor/github.com/lxc/lxd/shared/ioprogress/data.go b/vendor/github.com/lxc/lxd/shared/ioprogress/data.go deleted file mode 100644 index 3a86dddf..00000000 --- a/vendor/github.com/lxc/lxd/shared/ioprogress/data.go +++ /dev/null @@ -1,16 +0,0 @@ -package ioprogress - -// The ProgressData struct represents new progress information on an operation. -type ProgressData struct { - // Preferred string repreentation of progress (always set) - Text string - - // Progress in percent - Percentage int - - // Number of bytes transferred (for files) - TransferredBytes int64 - - // Total number of bytes (for files) - TotalBytes int64 -} diff --git a/vendor/github.com/lxc/lxd/shared/ioprogress/reader.go b/vendor/github.com/lxc/lxd/shared/ioprogress/reader.go deleted file mode 100644 index a0436c78..00000000 --- a/vendor/github.com/lxc/lxd/shared/ioprogress/reader.go +++ /dev/null @@ -1,25 +0,0 @@ -package ioprogress - -import ( - "io" -) - -// ProgressReader is a wrapper around ReadCloser which allows for progress tracking. -type ProgressReader struct { - io.ReadCloser - Tracker *ProgressTracker -} - -// Read in ProgressReader is the same as io.Read. -func (pt *ProgressReader) Read(p []byte) (int, error) { - // Do normal reader tasks - n, err := pt.ReadCloser.Read(p) - - // Do the actual progress tracking - if pt.Tracker != nil { - pt.Tracker.total += int64(n) - pt.Tracker.update(n) - } - - return n, err -} diff --git a/vendor/github.com/lxc/lxd/shared/ioprogress/tracker.go b/vendor/github.com/lxc/lxd/shared/ioprogress/tracker.go deleted file mode 100644 index 83429215..00000000 --- a/vendor/github.com/lxc/lxd/shared/ioprogress/tracker.go +++ /dev/null @@ -1,77 +0,0 @@ -package ioprogress - -import ( - "time" -) - -// ProgressTracker provides the stream information needed for tracking. -type ProgressTracker struct { - Length int64 - Handler func(int64, int64) - - percentage float64 - total int64 - start *time.Time - last *time.Time -} - -func (pt *ProgressTracker) update(n int) { - // Skip the rest if no handler attached - if pt.Handler == nil { - return - } - - // Initialize start time if needed - if pt.start == nil { - cur := time.Now() - pt.start = &cur - pt.last = pt.start - } - - // Skip if no data to count - if n <= 0 { - return - } - - // Update interval handling - var percentage float64 - if pt.Length > 0 { - // If running in relative mode, check that we increased by at least 1% - percentage = float64(pt.total) / float64(pt.Length) * float64(100) - if percentage-pt.percentage < 0.9 { - return - } - } else { - // If running in absolute mode, check that at least a second elapsed - interval := time.Since(*pt.last).Seconds() - if interval < 1 { - return - } - } - - // Determine speed - speedInt := int64(0) - duration := time.Since(*pt.start).Seconds() - if duration > 0 { - speed := float64(pt.total) / duration - speedInt = int64(speed) - } - - // Determine progress - var progressInt int64 - if pt.Length > 0 { - pt.percentage = percentage - progressInt = int64(1 + int(percentage)) - if progressInt > 100 { - progressInt = 100 - } - } else { - progressInt = pt.total - - // Update timestamp - cur := time.Now() - pt.last = &cur - } - - pt.Handler(progressInt, speedInt) -} diff --git a/vendor/github.com/lxc/lxd/shared/ioprogress/writer.go b/vendor/github.com/lxc/lxd/shared/ioprogress/writer.go deleted file mode 100644 index 52a6f2a6..00000000 --- a/vendor/github.com/lxc/lxd/shared/ioprogress/writer.go +++ /dev/null @@ -1,25 +0,0 @@ -package ioprogress - -import ( - "io" -) - -// ProgressWriter is a wrapper around WriteCloser which allows for progress tracking. -type ProgressWriter struct { - io.WriteCloser - Tracker *ProgressTracker -} - -// Write in ProgressWriter is the same as io.Write. -func (pt *ProgressWriter) Write(p []byte) (int, error) { - // Do normal writer tasks - n, err := pt.WriteCloser.Write(p) - - // Do the actual progress tracking - if pt.Tracker != nil { - pt.Tracker.total += int64(n) - pt.Tracker.update(n) - } - - return n, err -} diff --git a/vendor/github.com/lxc/lxd/shared/json.go b/vendor/github.com/lxc/lxd/shared/json.go deleted file mode 100644 index 0d880d35..00000000 --- a/vendor/github.com/lxc/lxd/shared/json.go +++ /dev/null @@ -1,51 +0,0 @@ -package shared - -import ( - "fmt" -) - -type Jmap map[string]any - -func (m Jmap) GetString(key string) (string, error) { - val, ok := m[key] - if !ok { - return "", fmt.Errorf("Response was missing `%s`", key) - } else if val, ok := val.(string); !ok { - return "", fmt.Errorf("`%s` was not a string", key) - } else { - return val, nil - } -} - -func (m Jmap) GetMap(key string) (Jmap, error) { - val, ok := m[key] - if !ok { - return nil, fmt.Errorf("Response was missing `%s`", key) - } else if val, ok := val.(map[string]any); !ok { - return nil, fmt.Errorf("`%s` was not a map, got %T", key, m[key]) - } else { - return val, nil - } -} - -func (m Jmap) GetInt(key string) (int, error) { - val, ok := m[key] - if !ok { - return -1, fmt.Errorf("Response was missing `%s`", key) - } else if val, ok := val.(float64); !ok { - return -1, fmt.Errorf("`%s` was not an int", key) - } else { - return int(val), nil - } -} - -func (m Jmap) GetBool(key string) (bool, error) { - val, ok := m[key] - if !ok { - return false, fmt.Errorf("Response was missing `%s`", key) - } else if val, ok := val.(bool); !ok { - return false, fmt.Errorf("`%s` was not an int", key) - } else { - return val, nil - } -} diff --git a/vendor/github.com/lxc/lxd/shared/logger/format.go b/vendor/github.com/lxc/lxd/shared/logger/format.go deleted file mode 100644 index 3d5a3c79..00000000 --- a/vendor/github.com/lxc/lxd/shared/logger/format.go +++ /dev/null @@ -1,25 +0,0 @@ -package logger - -import ( - "encoding/json" - "fmt" - "runtime" -) - -// Pretty will attempt to convert any Go structure into a string suitable for logging. -func Pretty(input any) string { - pretty, err := json.MarshalIndent(input, "\t", "\t") - if err != nil { - return fmt.Sprintf("%v", input) - } - - return fmt.Sprintf("\n\t%s", pretty) -} - -// GetStack will convert the Go stack into a string suitable for logging. -func GetStack() string { - buf := make([]byte, 1<<16) - n := runtime.Stack(buf, true) - - return fmt.Sprintf("\n\t%s", buf[:n]) -} diff --git a/vendor/github.com/lxc/lxd/shared/logger/log.go b/vendor/github.com/lxc/lxd/shared/logger/log.go deleted file mode 100644 index c9d42875..00000000 --- a/vendor/github.com/lxc/lxd/shared/logger/log.go +++ /dev/null @@ -1,72 +0,0 @@ -package logger - -import ( - "io" - "os" - - "github.com/sirupsen/logrus" - lWriter "github.com/sirupsen/logrus/hooks/writer" - - "github.com/lxc/lxd/shared/termios" -) - -// Setup a basic empty logger on init. -func init() { - logger := logrus.New() - logger.SetOutput(io.Discard) - - Log = newWrapper(logger) -} - -// InitLogger intializes a full logging instance. -func InitLogger(filepath string, syslogName string, verbose bool, debug bool, hook logrus.Hook) error { - logger := logrus.New() - logger.Level = logrus.DebugLevel - logger.SetOutput(io.Discard) - - // Setup the formatter. - logger.Formatter = &logrus.TextFormatter{PadLevelText: true, FullTimestamp: true, ForceColors: termios.IsTerminal(int(os.Stderr.Fd()))} - - // Setup log level. - levels := []logrus.Level{logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel, logrus.WarnLevel} - if debug { - levels = append(levels, logrus.InfoLevel, logrus.DebugLevel) - } else if verbose { - levels = append(levels, logrus.InfoLevel) - } - - // Setup writers. - writers := []io.Writer{os.Stderr} - - if filepath != "" { - f, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) - if err != nil { - return err - } - - writers = append(writers, f) - } - - logger.AddHook(&lWriter.Hook{ - Writer: io.MultiWriter(writers...), - LogLevels: levels, - }) - - // Setup syslog. - if syslogName != "" { - err := setupSyslog(logger, syslogName) - if err != nil { - return err - } - } - - // Add hooks. - if hook != nil { - logger.AddHook(hook) - } - - // Set the logger. - Log = newWrapper(logger) - - return nil -} diff --git a/vendor/github.com/lxc/lxd/shared/logger/syslog_linux.go b/vendor/github.com/lxc/lxd/shared/logger/syslog_linux.go deleted file mode 100644 index 293b44ba..00000000 --- a/vendor/github.com/lxc/lxd/shared/logger/syslog_linux.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build linux - -package logger - -import ( - "log/syslog" - - "github.com/sirupsen/logrus" - lSyslog "github.com/sirupsen/logrus/hooks/syslog" -) - -type syslogHandler struct { - handler logrus.Hook -} - -func (h syslogHandler) Fire(entry *logrus.Entry) error { - return h.handler.Fire(entry) -} - -func (h syslogHandler) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - } -} - -func setupSyslog(logger *logrus.Logger, syslogName string) error { - syslogHook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, syslogName) - if err != nil { - return err - } - - logger.AddHook(syslogHandler{syslogHook}) - return nil -} diff --git a/vendor/github.com/lxc/lxd/shared/logger/syslog_other.go b/vendor/github.com/lxc/lxd/shared/logger/syslog_other.go deleted file mode 100644 index b9b97150..00000000 --- a/vendor/github.com/lxc/lxd/shared/logger/syslog_other.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !linux - -package logger - -import ( - "fmt" - - "github.com/sirupsen/logrus" -) - -func setupSyslog(logger *logrus.Logger, syslogName string) error { - return fmt.Errorf("Syslog logging isn't supported on this platform") -} diff --git a/vendor/github.com/lxc/lxd/shared/logger/toplevel.go b/vendor/github.com/lxc/lxd/shared/logger/toplevel.go deleted file mode 100644 index 2bd072a5..00000000 --- a/vendor/github.com/lxc/lxd/shared/logger/toplevel.go +++ /dev/null @@ -1,70 +0,0 @@ -package logger - -import ( - "fmt" -) - -// Trace logs a message (with optional context) at the TRACE log level. -func Trace(msg string, ctx ...Ctx) { - Log.Trace(msg, ctx...) -} - -// Debug logs a message (with optional context) at the DEBUG log level. -func Debug(msg string, ctx ...Ctx) { - Log.Debug(msg, ctx...) -} - -// Info logs a message (with optional context) at the INFO log level. -func Info(msg string, ctx ...Ctx) { - Log.Info(msg, ctx...) -} - -// Warn logs a message (with optional context) at the WARNING log level. -func Warn(msg string, ctx ...Ctx) { - Log.Warn(msg, ctx...) -} - -// Error logs a message (with optional context) at the ERROR log level. -func Error(msg string, ctx ...Ctx) { - Log.Error(msg, ctx...) -} - -// Panic logs a message (with optional context) at the PANIC log level. -func Panic(msg string, ctx ...Ctx) { - Log.Panic(msg, ctx...) -} - -// Tracef logs at the TRACE log level using a standard printf format string. -func Tracef(format string, args ...any) { - Log.Trace(fmt.Sprintf(format, args...)) -} - -// Debugf logs at the DEBUG log level using a standard printf format string. -func Debugf(format string, args ...any) { - Log.Debug(fmt.Sprintf(format, args...)) -} - -// Infof logs at the INFO log level using a standard printf format string. -func Infof(format string, args ...any) { - Log.Info(fmt.Sprintf(format, args...)) -} - -// Warnf logs at the WARNING log level using a standard printf format string. -func Warnf(format string, args ...any) { - Log.Warn(fmt.Sprintf(format, args...)) -} - -// Errorf logs at the ERROR log level using a standard printf format string. -func Errorf(format string, args ...any) { - Log.Error(fmt.Sprintf(format, args...)) -} - -// Panicf logs at the PANIC log level using a standard printf format string. -func Panicf(format string, args ...any) { - Log.Panic(fmt.Sprintf(format, args...)) -} - -// AddContext returns a new logger with the context added. -func AddContext(logger Logger, ctx Ctx) Logger { - return Log.AddContext(ctx) -} diff --git a/vendor/github.com/lxc/lxd/shared/logger/types.go b/vendor/github.com/lxc/lxd/shared/logger/types.go deleted file mode 100644 index a9409c02..00000000 --- a/vendor/github.com/lxc/lxd/shared/logger/types.go +++ /dev/null @@ -1,35 +0,0 @@ -package logger - -import ( - "github.com/sirupsen/logrus" -) - -// Ctx is the logging context. -type Ctx logrus.Fields - -// Log contains the logger used by all the logging functions. -var Log Logger - -// Logger is the main logging interface. -type Logger interface { - Panic(msg string, args ...Ctx) - Fatal(msg string, args ...Ctx) - Error(msg string, args ...Ctx) - Warn(msg string, args ...Ctx) - Info(msg string, args ...Ctx) - Debug(msg string, args ...Ctx) - Trace(msg string, args ...Ctx) - AddContext(Ctx) Logger -} - -// targetLogger represents the subset of logrus.Logger and logrus.Entry that we care about. -type targetLogger interface { - Panic(args ...interface{}) - Fatal(args ...interface{}) - Error(args ...interface{}) - Warn(args ...interface{}) - Info(args ...interface{}) - Debug(args ...interface{}) - Trace(args ...interface{}) - WithFields(fields logrus.Fields) *logrus.Entry -} diff --git a/vendor/github.com/lxc/lxd/shared/logger/wrapper.go b/vendor/github.com/lxc/lxd/shared/logger/wrapper.go deleted file mode 100644 index c41970eb..00000000 --- a/vendor/github.com/lxc/lxd/shared/logger/wrapper.go +++ /dev/null @@ -1,55 +0,0 @@ -package logger - -import ( - "github.com/sirupsen/logrus" -) - -// ctxLogger returns a logger target with all provided ctx applied. -func (lw *logWrapper) ctxLogger(ctx ...Ctx) targetLogger { - logger := lw.target - for _, c := range ctx { - logger = logger.WithFields(logrus.Fields(c)) - } - - return logger -} - -func newWrapper(target targetLogger) Logger { - return &logWrapper{target} -} - -type logWrapper struct { - target targetLogger -} - -func (lw *logWrapper) Panic(msg string, ctx ...Ctx) { - lw.ctxLogger(ctx...).Panic(msg) -} - -func (lw *logWrapper) Fatal(msg string, ctx ...Ctx) { - lw.ctxLogger(ctx...).Fatal(msg) -} - -func (lw *logWrapper) Error(msg string, ctx ...Ctx) { - lw.ctxLogger(ctx...).Error(msg) -} - -func (lw *logWrapper) Warn(msg string, ctx ...Ctx) { - lw.ctxLogger(ctx...).Warn(msg) -} - -func (lw *logWrapper) Info(msg string, ctx ...Ctx) { - lw.ctxLogger(ctx...).Info(msg) -} - -func (lw *logWrapper) Debug(msg string, ctx ...Ctx) { - lw.ctxLogger(ctx...).Debug(msg) -} - -func (lw *logWrapper) Trace(msg string, ctx ...Ctx) { - lw.ctxLogger(ctx...).Trace(msg) -} - -func (lw *logWrapper) AddContext(ctx Ctx) Logger { - return &logWrapper{lw.ctxLogger(ctx)} -} diff --git a/vendor/github.com/lxc/lxd/shared/network.go b/vendor/github.com/lxc/lxd/shared/network.go deleted file mode 100644 index 1f584714..00000000 --- a/vendor/github.com/lxc/lxd/shared/network.go +++ /dev/null @@ -1,517 +0,0 @@ -package shared - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "io" - "net" - "net/http" - "os" - "strings" - "sync" - "time" - - "github.com/gorilla/websocket" - - "github.com/lxc/lxd/shared/logger" -) - -// connectErrorPrefix used as prefix to error returned from RFC3493Dialer. -const connectErrorPrefix = "Unable to connect to" - -// RFC3493Dialer connects to the specified server and returns the connection. -// If the connection cannot be established then an error with the connectErrorPrefix is returned. -func RFC3493Dialer(context context.Context, network string, address string) (net.Conn, error) { - host, port, err := net.SplitHostPort(address) - if err != nil { - return nil, err - } - - addrs, err := net.LookupHost(host) - if err != nil { - return nil, err - } - - var errs []error - for _, a := range addrs { - c, err := net.DialTimeout(network, net.JoinHostPort(a, port), 10*time.Second) - if err != nil { - errs = append(errs, err) - continue - } - - tc, ok := c.(*net.TCPConn) - if ok { - _ = tc.SetKeepAlive(true) - _ = tc.SetKeepAlivePeriod(3 * time.Second) - } - - return c, nil - } - - return nil, fmt.Errorf("%s: %s (%v)", connectErrorPrefix, address, errs) -} - -// IsConnectionError returns true if the given error is due to the dialer not being able to connect to the target -// LXD server. -func IsConnectionError(err error) bool { - // FIXME: unfortunately the LXD client currently does not provide a way to differentiate between errors. - return strings.Contains(err.Error(), connectErrorPrefix) -} - -// InitTLSConfig returns a tls.Config populated with default encryption -// parameters. This is used as baseline config for both client and server -// certificates used by LXD. -func InitTLSConfig() *tls.Config { - config := &tls.Config{} - - // Restrict to TLS 1.3 unless LXD_INSECURE_TLS is set. - if IsFalseOrEmpty(os.Getenv("LXD_INSECURE_TLS")) { - config.MinVersion = tls.VersionTLS13 - } else { - config.MinVersion = tls.VersionTLS12 - } - - return config -} - -func finalizeTLSConfig(tlsConfig *tls.Config, tlsRemoteCert *x509.Certificate) { - // Setup RootCA - if tlsConfig.RootCAs == nil { - tlsConfig.RootCAs, _ = systemCertPool() - } - - // Trusted certificates - if tlsRemoteCert != nil { - if tlsConfig.RootCAs == nil { - tlsConfig.RootCAs = x509.NewCertPool() - } - - // Make it a valid RootCA - tlsRemoteCert.IsCA = true - tlsRemoteCert.KeyUsage = x509.KeyUsageCertSign - - // Setup the pool - tlsConfig.RootCAs.AddCert(tlsRemoteCert) - - // Set the ServerName - if tlsRemoteCert.DNSNames != nil { - tlsConfig.ServerName = tlsRemoteCert.DNSNames[0] - } - } - - tlsConfig.BuildNameToCertificate() -} - -func GetTLSConfig(tlsClientCertFile string, tlsClientKeyFile string, tlsClientCAFile string, tlsRemoteCert *x509.Certificate) (*tls.Config, error) { - tlsConfig := InitTLSConfig() - - // Client authentication - if tlsClientCertFile != "" && tlsClientKeyFile != "" { - cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile) - if err != nil { - return nil, err - } - - tlsConfig.Certificates = []tls.Certificate{cert} - } - - if tlsClientCAFile != "" { - caCertificates, err := os.ReadFile(tlsClientCAFile) - if err != nil { - return nil, err - } - - caPool := x509.NewCertPool() - caPool.AppendCertsFromPEM(caCertificates) - - tlsConfig.RootCAs = caPool - } - - finalizeTLSConfig(tlsConfig, tlsRemoteCert) - return tlsConfig, nil -} - -func GetTLSConfigMem(tlsClientCert string, tlsClientKey string, tlsClientCA string, tlsRemoteCertPEM string, insecureSkipVerify bool) (*tls.Config, error) { - tlsConfig := InitTLSConfig() - tlsConfig.InsecureSkipVerify = insecureSkipVerify - // Client authentication - if tlsClientCert != "" && tlsClientKey != "" { - cert, err := tls.X509KeyPair([]byte(tlsClientCert), []byte(tlsClientKey)) - if err != nil { - return nil, err - } - - tlsConfig.Certificates = []tls.Certificate{cert} - } - - var tlsRemoteCert *x509.Certificate - if tlsRemoteCertPEM != "" { - // Ignore any content outside of the PEM bytes we care about - certBlock, _ := pem.Decode([]byte(tlsRemoteCertPEM)) - if certBlock == nil { - return nil, fmt.Errorf("Invalid remote certificate") - } - - var err error - tlsRemoteCert, err = x509.ParseCertificate(certBlock.Bytes) - if err != nil { - return nil, err - } - } - - if tlsClientCA != "" { - caPool := x509.NewCertPool() - caPool.AppendCertsFromPEM([]byte(tlsClientCA)) - - tlsConfig.RootCAs = caPool - } - - finalizeTLSConfig(tlsConfig, tlsRemoteCert) - - return tlsConfig, nil -} - -func IsLoopback(iface *net.Interface) bool { - return int(iface.Flags&net.FlagLoopback) > 0 -} - -func WebsocketSendStream(conn *websocket.Conn, r io.Reader, bufferSize int) chan bool { - ch := make(chan bool) - - if r == nil { - close(ch) - return ch - } - - go func(conn *websocket.Conn, r io.Reader) { - in := ReaderToChannel(r, bufferSize) - for { - buf, ok := <-in - if !ok { - break - } - - err := conn.WriteMessage(websocket.BinaryMessage, buf) - if err != nil { - logger.Debug("Got err writing", logger.Ctx{"err": err}) - break - } - } - _ = conn.WriteMessage(websocket.TextMessage, []byte{}) - ch <- true - }(conn, r) - - return ch -} - -func WebsocketRecvStream(w io.Writer, conn *websocket.Conn) chan bool { - ch := make(chan bool) - - go func(w io.Writer, conn *websocket.Conn) { - for { - mt, r, err := conn.NextReader() - if mt == websocket.CloseMessage { - logger.Debug("WebsocketRecvStream got close message for reader") - break - } - - if mt == websocket.TextMessage { - logger.Debug("WebsocketRecvStream got message barrier") - break - } - - if err != nil { - logger.Debug("WebsocketRecvStream got error getting next reader", logger.Ctx{"err": err}) - break - } - - buf, err := io.ReadAll(r) - if err != nil { - logger.Debug("WebsocketRecvStream got error writing to writer", logger.Ctx{"err": err}) - break - } - - if w == nil { - continue - } - - i, err := w.Write(buf) - if i != len(buf) { - logger.Debug("WebsocketRecvStream didn't write all of buf") - break - } - - if err != nil { - logger.Debug("WebsocketRecvStream error writing buf", logger.Ctx{"err": err}) - break - } - } - ch <- true - }(w, conn) - - return ch -} - -func WebsocketProxy(source *websocket.Conn, target *websocket.Conn) chan struct{} { - // Forwarder between two websockets, closes channel upon disconnection. - forward := func(in *websocket.Conn, out *websocket.Conn, ch chan struct{}) { - for { - mt, r, err := in.NextReader() - if err != nil { - break - } - - w, err := out.NextWriter(mt) - if err != nil { - break - } - - _, err = io.Copy(w, r) - _ = w.Close() - if err != nil { - break - } - } - - close(ch) - } - - // Spawn forwarders in both directions. - chSend := make(chan struct{}) - go forward(source, target, chSend) - - chRecv := make(chan struct{}) - go forward(target, source, chRecv) - - // Close main channel and disconnect upon completion of either forwarder. - ch := make(chan struct{}) - go func() { - select { - case <-chSend: - case <-chRecv: - } - - _ = source.Close() - _ = target.Close() - - close(ch) - }() - - return ch -} - -func defaultReader(conn *websocket.Conn, r io.ReadCloser, readDone chan<- bool) { - /* For now, we don't need to adjust buffer sizes in - * WebsocketMirror, since it's used for interactive things like - * exec. - */ - in := ReaderToChannel(r, -1) - for { - buf, ok := <-in - if !ok { - _ = r.Close() - logger.Debug("Sending write barrier") - _ = conn.WriteMessage(websocket.TextMessage, []byte{}) - readDone <- true - return - } - - err := conn.WriteMessage(websocket.BinaryMessage, buf) - if err != nil { - logger.Debug("Got err writing", logger.Ctx{"err": err}) - break - } - } - closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") - _ = conn.WriteMessage(websocket.CloseMessage, closeMsg) - readDone <- true - _ = r.Close() -} - -func DefaultWriter(conn *websocket.Conn, w io.WriteCloser, writeDone chan<- bool) { - for { - mt, r, err := conn.NextReader() - if err != nil { - logger.Debug("DefaultWriter got error getting next reader", logger.Ctx{"err": err}) - break - } - - if mt == websocket.CloseMessage { - logger.Debug("DefaultWriter got close message for reader") - break - } - - if mt == websocket.TextMessage { - logger.Debug("DefaultWriter got message barrier, resetting stream") - break - } - - buf, err := io.ReadAll(r) - if err != nil { - logger.Debug("DefaultWriter got error writing to writer", logger.Ctx{"err": err}) - break - } - - i, err := w.Write(buf) - if i != len(buf) { - logger.Debug("DefaultWriter didn't write all of buf") - break - } - - if err != nil { - logger.Debug("DefaultWriter error writing buf", logger.Ctx{"err": err}) - break - } - } - writeDone <- true - _ = w.Close() -} - -// WebsocketIO is a wrapper implementing ReadWriteCloser on top of websocket. -type WebsocketIO struct { - Conn *websocket.Conn - reader io.Reader - mur sync.Mutex - muw sync.Mutex -} - -func (w *WebsocketIO) Read(p []byte) (n int, err error) { - w.mur.Lock() - defer w.mur.Unlock() - - // Get new message if no active one. - if w.reader == nil { - var mt int - - mt, w.reader, err = w.Conn.NextReader() - if err != nil { - return 0, err - } - - if mt == websocket.CloseMessage || mt == websocket.TextMessage { - w.reader = nil // At the end of the message, reset reader. - - return 0, io.EOF - } - } - - // Perform the read itself. - n, err = w.reader.Read(p) - if err != nil { - w.reader = nil // At the end of the message, reset reader. - - if err == io.EOF { - return n, nil // Don't return EOF error at end of message. - } - - return n, err - } - - return n, nil -} - -func (w *WebsocketIO) Write(p []byte) (int, error) { - w.muw.Lock() - defer w.muw.Unlock() - - err := w.Conn.WriteMessage(websocket.BinaryMessage, p) - if err != nil { - return -1, err - } - - return len(p), nil -} - -// Close sends a control message indicating the stream is finished, but it does not actually close the socket. -func (w *WebsocketIO) Close() error { - w.muw.Lock() - defer w.muw.Unlock() - // Target expects to get a control message indicating stream is finished. - return w.Conn.WriteMessage(websocket.TextMessage, []byte{}) -} - -// WebsocketMirror allows mirroring a reader to a websocket and taking the -// result and writing it to a writer. This function allows for multiple -// mirrorings and correctly negotiates stream endings. However, it means any -// websocket.Conns passed to it are live when it returns, and must be closed -// explicitly. -type WebSocketMirrorReader func(conn *websocket.Conn, r io.ReadCloser, readDone chan<- bool) -type WebSocketMirrorWriter func(conn *websocket.Conn, w io.WriteCloser, writeDone chan<- bool) - -func WebsocketMirror(conn *websocket.Conn, w io.WriteCloser, r io.ReadCloser, Reader WebSocketMirrorReader, Writer WebSocketMirrorWriter) (chan bool, chan bool) { - readDone := make(chan bool, 1) - writeDone := make(chan bool, 1) - - ReadFunc := Reader - if ReadFunc == nil { - ReadFunc = defaultReader - } - - WriteFunc := Writer - if WriteFunc == nil { - WriteFunc = DefaultWriter - } - - go ReadFunc(conn, r, readDone) - go WriteFunc(conn, w, writeDone) - - return readDone, writeDone -} - -func WebsocketConsoleMirror(conn *websocket.Conn, w io.WriteCloser, r io.ReadCloser) (chan bool, chan bool) { - readDone := make(chan bool, 1) - writeDone := make(chan bool, 1) - - go DefaultWriter(conn, w, writeDone) - - go func(conn *websocket.Conn, r io.ReadCloser) { - in := ReaderToChannel(r, -1) - for { - buf, ok := <-in - if !ok { - _ = r.Close() - logger.Debugf("Sending write barrier") - _ = conn.WriteMessage(websocket.BinaryMessage, []byte("\r")) - _ = conn.WriteMessage(websocket.TextMessage, []byte{}) - readDone <- true - return - } - - err := conn.WriteMessage(websocket.BinaryMessage, buf) - if err != nil { - logger.Debugf("Got err writing %s", err) - break - } - } - - closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") - _ = conn.WriteMessage(websocket.CloseMessage, closeMsg) - readDone <- true - _ = r.Close() - }(conn, r) - - return readDone, writeDone -} - -var WebsocketUpgrader = websocket.Upgrader{ - CheckOrigin: func(r *http.Request) bool { return true }, -} - -// AllocatePort asks the kernel for a free open port that is ready to use. -func AllocatePort() (int, error) { - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err != nil { - return -1, err - } - - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return -1, err - } - - return l.Addr().(*net.TCPAddr).Port, l.Close() -} diff --git a/vendor/github.com/lxc/lxd/shared/network_ip.go b/vendor/github.com/lxc/lxd/shared/network_ip.go deleted file mode 100644 index bb3dc033..00000000 --- a/vendor/github.com/lxc/lxd/shared/network_ip.go +++ /dev/null @@ -1,32 +0,0 @@ -package shared - -import ( - "bytes" - "fmt" - "net" -) - -// IPRange defines a range of IP addresses. -// Optionally just set Start to indicate a single IP. -type IPRange struct { - Start net.IP - End net.IP -} - -// ContainsIP tests whether a supplied IP falls within the IPRange. -func (r *IPRange) ContainsIP(ip net.IP) bool { - if r.End == nil { - // the range is only a single IP - return r.Start.Equal(ip) - } - - return bytes.Compare(ip, r.Start) >= 0 && bytes.Compare(ip, r.End) <= 0 -} - -func (r *IPRange) String() string { - if r.End == nil { - return r.Start.String() - } - - return fmt.Sprintf("%v-%v", r.Start, r.End) -} diff --git a/vendor/github.com/lxc/lxd/shared/network_unix.go b/vendor/github.com/lxc/lxd/shared/network_unix.go deleted file mode 100644 index eb486c16..00000000 --- a/vendor/github.com/lxc/lxd/shared/network_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !windows - -package shared - -import ( - "crypto/x509" - "os" -) - -func systemCertPool() (*x509.CertPool, error) { - // Get the system pool - pool, err := x509.SystemCertPool() - if err != nil { - return nil, err - } - - // Attempt to load the system's pool too (for snaps) - if PathExists("/var/lib/snapd/hostfs/etc/ssl/certs/ca-certificates.crt") { - snapCerts, err := os.ReadFile("/var/lib/snapd/hostfs/etc/ssl/certs/ca-certificates.crt") - if err == nil { - pool.AppendCertsFromPEM(snapCerts) - } - } - - return pool, nil -} diff --git a/vendor/github.com/lxc/lxd/shared/network_windows.go b/vendor/github.com/lxc/lxd/shared/network_windows.go deleted file mode 100644 index 89aabcd3..00000000 --- a/vendor/github.com/lxc/lxd/shared/network_windows.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build windows - -package shared - -import ( - "crypto/x509" - "fmt" - "sync" - "unsafe" - - "golang.org/x/sys/windows" -) - -var once sync.Once -var systemRoots *x509.CertPool - -func systemCertPool() (*x509.CertPool, error) { - once.Do(initSystemRoots) - if systemRoots == nil { - return nil, fmt.Errorf("Bad system root pool") - } - - return systemRoots, nil -} - -func initSystemRoots() { - const CRYPT_E_NOT_FOUND = 0x80092004 - - store, err := windows.CertOpenSystemStore(0, windows.StringToUTF16Ptr("ROOT")) - if err != nil { - systemRoots = nil - return - } - - defer windows.CertCloseStore(store, 0) - - roots := x509.NewCertPool() - var cert *windows.CertContext - for { - cert, err = windows.CertEnumCertificatesInStore(store, cert) - if err != nil { - errno, ok := err.(windows.Errno) - if ok { - if errno == CRYPT_E_NOT_FOUND { - break - } - } - - systemRoots = nil - return - } - - if cert == nil { - break - } - // Copy the buf, since ParseCertificate does not create its own copy. - buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:] - buf2 := make([]byte, cert.Length) - copy(buf2, buf) - c, err := x509.ParseCertificate(buf2) - if err == nil { - roots.AddCert(c) - } - } - systemRoots = roots -} diff --git a/vendor/github.com/lxc/lxd/shared/osarch/architectures.go b/vendor/github.com/lxc/lxd/shared/osarch/architectures.go deleted file mode 100644 index 799c2864..00000000 --- a/vendor/github.com/lxc/lxd/shared/osarch/architectures.go +++ /dev/null @@ -1,162 +0,0 @@ -package osarch - -import ( - "fmt" -) - -const ( - ARCH_UNKNOWN = 0 - ARCH_32BIT_INTEL_X86 = 1 - ARCH_64BIT_INTEL_X86 = 2 - ARCH_32BIT_ARMV7_LITTLE_ENDIAN = 3 - ARCH_64BIT_ARMV8_LITTLE_ENDIAN = 4 - ARCH_32BIT_POWERPC_BIG_ENDIAN = 5 - ARCH_64BIT_POWERPC_BIG_ENDIAN = 6 - ARCH_64BIT_POWERPC_LITTLE_ENDIAN = 7 - ARCH_64BIT_S390_BIG_ENDIAN = 8 - ARCH_32BIT_MIPS = 9 - ARCH_64BIT_MIPS = 10 - ARCH_32BIT_RISCV_LITTLE_ENDIAN = 11 - ARCH_64BIT_RISCV_LITTLE_ENDIAN = 12 - ARCH_32BIT_ARMV6_LITTLE_ENDIAN = 13 - ARCH_32BIT_ARMV8_LITTLE_ENDIAN = 14 -) - -var architectureNames = map[int]string{ - ARCH_32BIT_INTEL_X86: "i686", - ARCH_64BIT_INTEL_X86: "x86_64", - ARCH_32BIT_ARMV6_LITTLE_ENDIAN: "armv6l", - ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "armv7l", - ARCH_32BIT_ARMV8_LITTLE_ENDIAN: "armv8l", - ARCH_64BIT_ARMV8_LITTLE_ENDIAN: "aarch64", - ARCH_32BIT_POWERPC_BIG_ENDIAN: "ppc", - ARCH_64BIT_POWERPC_BIG_ENDIAN: "ppc64", - ARCH_64BIT_POWERPC_LITTLE_ENDIAN: "ppc64le", - ARCH_64BIT_S390_BIG_ENDIAN: "s390x", - ARCH_32BIT_MIPS: "mips", - ARCH_64BIT_MIPS: "mips64", - ARCH_32BIT_RISCV_LITTLE_ENDIAN: "riscv32", - ARCH_64BIT_RISCV_LITTLE_ENDIAN: "riscv64", -} - -var architectureAliases = map[int][]string{ - ARCH_32BIT_INTEL_X86: {"i386", "i586", "386", "x86", "generic_32"}, - ARCH_64BIT_INTEL_X86: {"amd64", "generic_64"}, - ARCH_32BIT_ARMV6_LITTLE_ENDIAN: {"armel", "arm"}, - ARCH_32BIT_ARMV7_LITTLE_ENDIAN: {"armhf", "armhfp", "armv7a_hardfp", "armv7", "armv7a_vfpv3_hardfp"}, - ARCH_32BIT_ARMV8_LITTLE_ENDIAN: {}, - ARCH_64BIT_ARMV8_LITTLE_ENDIAN: {"arm64", "arm64_generic"}, - ARCH_32BIT_POWERPC_BIG_ENDIAN: {"powerpc"}, - ARCH_64BIT_POWERPC_BIG_ENDIAN: {"powerpc64", "ppc64"}, - ARCH_64BIT_POWERPC_LITTLE_ENDIAN: {"ppc64el"}, - ARCH_32BIT_MIPS: {"mipsel", "mipsle"}, - ARCH_64BIT_MIPS: {"mips64el", "mips64le"}, - ARCH_32BIT_RISCV_LITTLE_ENDIAN: {}, - ARCH_64BIT_RISCV_LITTLE_ENDIAN: {}, -} - -var architecturePersonalities = map[int]string{ - ARCH_32BIT_INTEL_X86: "linux32", - ARCH_64BIT_INTEL_X86: "linux64", - ARCH_32BIT_ARMV6_LITTLE_ENDIAN: "linux32", - ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "linux32", - ARCH_32BIT_ARMV8_LITTLE_ENDIAN: "linux32", - ARCH_64BIT_ARMV8_LITTLE_ENDIAN: "linux64", - ARCH_32BIT_POWERPC_BIG_ENDIAN: "linux32", - ARCH_64BIT_POWERPC_BIG_ENDIAN: "linux64", - ARCH_64BIT_POWERPC_LITTLE_ENDIAN: "linux64", - ARCH_64BIT_S390_BIG_ENDIAN: "linux64", - ARCH_32BIT_MIPS: "linux32", - ARCH_64BIT_MIPS: "linux64", - ARCH_32BIT_RISCV_LITTLE_ENDIAN: "linux32", - ARCH_64BIT_RISCV_LITTLE_ENDIAN: "linux64", -} - -var architectureSupportedPersonalities = map[int][]int{ - ARCH_32BIT_INTEL_X86: {}, - ARCH_64BIT_INTEL_X86: {ARCH_32BIT_INTEL_X86}, - ARCH_32BIT_ARMV6_LITTLE_ENDIAN: {}, - ARCH_32BIT_ARMV7_LITTLE_ENDIAN: {ARCH_32BIT_ARMV6_LITTLE_ENDIAN}, - ARCH_32BIT_ARMV8_LITTLE_ENDIAN: {ARCH_32BIT_ARMV6_LITTLE_ENDIAN, ARCH_32BIT_ARMV7_LITTLE_ENDIAN}, - ARCH_64BIT_ARMV8_LITTLE_ENDIAN: {ARCH_32BIT_ARMV6_LITTLE_ENDIAN, ARCH_32BIT_ARMV7_LITTLE_ENDIAN, ARCH_32BIT_ARMV8_LITTLE_ENDIAN}, - ARCH_32BIT_POWERPC_BIG_ENDIAN: {}, - ARCH_64BIT_POWERPC_BIG_ENDIAN: {ARCH_32BIT_POWERPC_BIG_ENDIAN}, - ARCH_64BIT_POWERPC_LITTLE_ENDIAN: {}, - ARCH_64BIT_S390_BIG_ENDIAN: {}, - ARCH_32BIT_MIPS: {}, - ARCH_64BIT_MIPS: {ARCH_32BIT_MIPS}, - ARCH_32BIT_RISCV_LITTLE_ENDIAN: {}, - ARCH_64BIT_RISCV_LITTLE_ENDIAN: {}, -} - -const ArchitectureDefault = "x86_64" - -func ArchitectureName(arch int) (string, error) { - arch_name, exists := architectureNames[arch] - if exists { - return arch_name, nil - } - - return "unknown", fmt.Errorf("Architecture isn't supported: %d", arch) -} - -func ArchitectureId(arch string) (int, error) { - for arch_id, arch_name := range architectureNames { - if arch_name == arch { - return arch_id, nil - } - } - - for arch_id, arch_aliases := range architectureAliases { - for _, arch_name := range arch_aliases { - if arch_name == arch { - return arch_id, nil - } - } - } - - return ARCH_UNKNOWN, fmt.Errorf("Architecture isn't supported: %s", arch) -} - -func ArchitecturePersonality(arch int) (string, error) { - arch_personality, exists := architecturePersonalities[arch] - if exists { - return arch_personality, nil - } - - return "", fmt.Errorf("Architecture isn't supported: %d", arch) -} - -func ArchitecturePersonalities(arch int) ([]int, error) { - personalities, exists := architectureSupportedPersonalities[arch] - if exists { - return personalities, nil - } - - return []int{}, fmt.Errorf("Architecture isn't supported: %d", arch) -} - -// ArchitectureGetLocalID returns the local hardware architecture ID. -func ArchitectureGetLocalID() (int, error) { - name, err := ArchitectureGetLocal() - if err != nil { - return -1, err - } - - id, err := ArchitectureId(name) - if err != nil { - return -1, err - } - - return id, nil -} - -// SupportedArchitectures returns the list of all supported architectures. -func SupportedArchitectures() []string { - result := []string{} - for _, archName := range architectureNames { - result = append(result, archName) - } - - return result -} diff --git a/vendor/github.com/lxc/lxd/shared/osarch/architectures_linux.go b/vendor/github.com/lxc/lxd/shared/osarch/architectures_linux.go deleted file mode 100644 index 6b3ac33e..00000000 --- a/vendor/github.com/lxc/lxd/shared/osarch/architectures_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build linux - -package osarch - -import ( - "bytes" - - "golang.org/x/sys/unix" -) - -// ArchitectureGetLocal returns the local hardware architecture. -func ArchitectureGetLocal() (string, error) { - uname := unix.Utsname{} - err := unix.Uname(&uname) - if err != nil { - return ArchitectureDefault, err - } - - return string(uname.Machine[:bytes.IndexByte(uname.Machine[:], 0)]), nil -} diff --git a/vendor/github.com/lxc/lxd/shared/osarch/architectures_others.go b/vendor/github.com/lxc/lxd/shared/osarch/architectures_others.go deleted file mode 100644 index 38cd4eca..00000000 --- a/vendor/github.com/lxc/lxd/shared/osarch/architectures_others.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !linux - -package osarch - -func ArchitectureGetLocal() (string, error) { - return ArchitectureDefault, nil -} diff --git a/vendor/github.com/lxc/lxd/shared/osarch/release.go b/vendor/github.com/lxc/lxd/shared/osarch/release.go deleted file mode 100644 index 3afd8470..00000000 --- a/vendor/github.com/lxc/lxd/shared/osarch/release.go +++ /dev/null @@ -1,49 +0,0 @@ -package osarch - -import ( - "fmt" - "os" - "strings" -) - -// GetLSBRelease returns a map with Linux distribution information. -func GetLSBRelease() (map[string]string, error) { - osRelease, err := getLSBRelease("/etc/os-release") - if os.IsNotExist(err) { - return getLSBRelease("/usr/lib/os-release") - } - - return osRelease, err -} - -func getLSBRelease(filename string) (map[string]string, error) { - osRelease := make(map[string]string) - - data, err := os.ReadFile(filename) - if err != nil { - if os.IsNotExist(err) { - return osRelease, nil - } - - return osRelease, err - } - - for i, line := range strings.Split(string(data), "\n") { - if len(line) == 0 { - continue - } - - if strings.HasPrefix(line, "#") { - continue - } - - tokens := strings.SplitN(line, "=", 2) - if len(tokens) != 2 { - return osRelease, fmt.Errorf("%s: invalid format on line %d", filename, i+1) - } - - osRelease[tokens[0]] = strings.Trim(tokens[1], `'"`) - } - - return osRelease, nil -} diff --git a/vendor/github.com/lxc/lxd/shared/proxy.go b/vendor/github.com/lxc/lxd/shared/proxy.go deleted file mode 100644 index a43b18f4..00000000 --- a/vendor/github.com/lxc/lxd/shared/proxy.go +++ /dev/null @@ -1,184 +0,0 @@ -package shared - -import ( - "fmt" - "net" - "net/http" - "net/url" - "os" - "strings" - "sync" -) - -var ( - httpProxyEnv = &envOnce{ - names: []string{"HTTP_PROXY", "http_proxy"}, - } - - httpsProxyEnv = &envOnce{ - names: []string{"HTTPS_PROXY", "https_proxy"}, - } - - noProxyEnv = &envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -type envOnce struct { - names []string - once sync.Once - val string -} - -func (e *envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// This is basically the same as golang's ProxyFromEnvironment, except it -// doesn't fall back to http_proxy when https_proxy isn't around, which is -// incorrect behavior. It still respects HTTP_PROXY, HTTPS_PROXY, and NO_PROXY. -func ProxyFromEnvironment(req *http.Request) (*url.URL, error) { - return ProxyFromConfig("", "", "")(req) -} - -func ProxyFromConfig(httpsProxy string, httpProxy string, noProxy string) func(req *http.Request) (*url.URL, error) { - return func(req *http.Request) (*url.URL, error) { - var proxy, port string - var err error - - switch req.URL.Scheme { - case "https": - proxy = httpsProxy - if proxy == "" { - proxy = httpsProxyEnv.Get() - } - - port = ":443" - case "http": - proxy = httpProxy - if proxy == "" { - proxy = httpProxyEnv.Get() - } - - port = ":80" - default: - return nil, fmt.Errorf("unknown scheme %s", req.URL.Scheme) - } - - if proxy == "" { - return nil, nil - } - - addr := req.URL.Host - if !hasPort(addr) { - addr = addr + port - } - - use, err := useProxy(addr, noProxy) - if err != nil { - return nil, err - } - - if !use { - return nil, nil - } - - proxyURL, err := url.Parse(proxy) - if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { - // proxy was bogus. Try prepending "http://" to it and - // see if that parses correctly. If not, we fall - // through and complain about the original one. - proxyURL, err := url.Parse("http://" + proxy) - if err == nil { - return proxyURL, nil - } - } - if err != nil { - return nil, fmt.Errorf("invalid proxy address %q: %w", proxy, err) - } - - return proxyURL, nil - } -} - -func hasPort(s string) bool { - return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") -} - -func useProxy(addr string, noProxy string) (bool, error) { - if noProxy == "" { - noProxy = noProxyEnv.Get() - } - - if len(addr) == 0 { - return true, nil - } - - host, _, err := net.SplitHostPort(addr) - if err != nil { - return false, nil - } - - if host == "localhost" { - return false, nil - } - - ip := net.ParseIP(host) - if ip != nil { - if ip.IsLoopback() { - return false, nil - } - } - - if noProxy == "*" { - return false, nil - } - - addr = strings.ToLower(strings.TrimSpace(addr)) - if hasPort(addr) { - addr = addr[:strings.LastIndex(addr, ":")] - } - - for _, p := range strings.Split(noProxy, ",") { - p = strings.ToLower(strings.TrimSpace(p)) - if len(p) == 0 { - continue - } - - if hasPort(p) { - p = p[:strings.LastIndex(p, ":")] - } - - if addr == p { - return false, nil - } - - _, pnet, err := net.ParseCIDR(p) - if err == nil && ip != nil { - // IPv4/CIDR, IPv6/CIDR - if pnet.Contains(ip) { - return false, nil - } - } - if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) { - // noProxy ".foo.com" matches "bar.foo.com" or "foo.com" - return false, nil - } - - if p[0] != '.' && strings.HasSuffix(addr, p) && addr[len(addr)-len(p)-1] == '.' { - // noProxy "foo.com" matches "bar.foo.com" - return false, nil - } - } - return true, nil -} diff --git a/vendor/github.com/lxc/lxd/shared/simplestreams/index.go b/vendor/github.com/lxc/lxd/shared/simplestreams/index.go deleted file mode 100644 index 6ea63259..00000000 --- a/vendor/github.com/lxc/lxd/shared/simplestreams/index.go +++ /dev/null @@ -1,17 +0,0 @@ -package simplestreams - -// Stream represents the base structure of index.json. -type Stream struct { - Index map[string]StreamIndex `json:"index"` - Updated string `json:"updated,omitempty"` - Format string `json:"format"` -} - -// StreamIndex represents the Index entry inside index.json. -type StreamIndex struct { - DataType string `json:"datatype"` - Path string `json:"path"` - Updated string `json:"updated,omitempty"` - Products []string `json:"products"` - Format string `json:"format,omitempty"` -} diff --git a/vendor/github.com/lxc/lxd/shared/simplestreams/products.go b/vendor/github.com/lxc/lxd/shared/simplestreams/products.go deleted file mode 100644 index de71debf..00000000 --- a/vendor/github.com/lxc/lxd/shared/simplestreams/products.go +++ /dev/null @@ -1,290 +0,0 @@ -package simplestreams - -import ( - "fmt" - "strings" - "time" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/osarch" -) - -// Products represents the base of download.json. -type Products struct { - ContentID string `json:"content_id"` - DataType string `json:"datatype"` - Format string `json:"format"` - License string `json:"license,omitempty"` - Products map[string]Product `json:"products"` - Updated string `json:"updated,omitempty"` -} - -// Product represents a single product inside download.json. -type Product struct { - Aliases string `json:"aliases"` - Architecture string `json:"arch"` - OperatingSystem string `json:"os"` - LXDRequirements map[string]string `json:"lxd_requirements,omitempty"` - Release string `json:"release"` - ReleaseCodename string `json:"release_codename,omitempty"` - ReleaseTitle string `json:"release_title"` - Supported bool `json:"supported,omitempty"` - SupportedEOL string `json:"support_eol,omitempty"` - Version string `json:"version,omitempty"` - Versions map[string]ProductVersion `json:"versions"` - - // Non-standard fields (only used on some image servers). - Variant string `json:"variant,omitempty"` -} - -// ProductVersion represents a particular version of a product. -type ProductVersion struct { - Items map[string]ProductVersionItem `json:"items"` - Label string `json:"label,omitempty"` - PublicName string `json:"pubname,omitempty"` -} - -// ProductVersionItem represents a file/item of a particular ProductVersion. -type ProductVersionItem struct { - LXDHashSha256DiskImg string `json:"combined_disk1-img_sha256,omitempty"` - LXDHashSha256DiskKvmImg string `json:"combined_disk-kvm-img_sha256,omitempty"` - LXDHashSha256DiskUefiImg string `json:"combined_uefi1-img_sha256,omitempty"` - LXDHashSha256RootXz string `json:"combined_rootxz_sha256,omitempty"` - LXDHashSha256 string `json:"combined_sha256,omitempty"` - LXDHashSha256SquashFs string `json:"combined_squashfs_sha256,omitempty"` - FileType string `json:"ftype"` - HashMd5 string `json:"md5,omitempty"` - Path string `json:"path"` - HashSha256 string `json:"sha256,omitempty"` - Size int64 `json:"size"` - DeltaBase string `json:"delta_base,omitempty"` -} - -// ToLXD converts the products data into a list of LXD images and associated downloadable files. -func (s *Products) ToLXD() ([]api.Image, map[string][][]string) { - downloads := map[string][][]string{} - - images := []api.Image{} - nameLayout := "20060102" - eolLayout := "2006-01-02" - - for _, product := range s.Products { - // Skip unsupported architectures - architecture, err := osarch.ArchitectureId(product.Architecture) - if err != nil { - continue - } - - architectureName, err := osarch.ArchitectureName(architecture) - if err != nil { - continue - } - - for name, version := range product.Versions { - // Short of anything better, use the name as date (see format above) - if len(name) < 8 { - continue - } - - creationDate, err := time.Parse(nameLayout, name[0:8]) - if err != nil { - continue - } - - // Image processing function - addImage := func(meta *ProductVersionItem, root *ProductVersionItem) error { - // Look for deltas (only on squashfs) - deltas := []ProductVersionItem{} - if root != nil && root.FileType == "squashfs" { - for _, item := range version.Items { - if item.FileType == "squashfs.vcdiff" { - deltas = append(deltas, item) - } - } - } - - // Figure out the fingerprint - fingerprint := "" - if root != nil { - if root.FileType == "root.tar.xz" { - if meta.LXDHashSha256RootXz != "" { - fingerprint = meta.LXDHashSha256RootXz - } else { - fingerprint = meta.LXDHashSha256 - } - } else if root.FileType == "squashfs" { - fingerprint = meta.LXDHashSha256SquashFs - } else if root.FileType == "disk-kvm.img" { - fingerprint = meta.LXDHashSha256DiskKvmImg - } else if root.FileType == "disk1.img" { - fingerprint = meta.LXDHashSha256DiskImg - } else if root.FileType == "uefi1.img" { - fingerprint = meta.LXDHashSha256DiskUefiImg - } - } else { - fingerprint = meta.HashSha256 - } - - if fingerprint == "" { - return fmt.Errorf("No LXD image fingerprint found") - } - - // Figure out the size - size := meta.Size - if root != nil { - size += root.Size - } - - // Determine filename - if meta.Path == "" { - return fmt.Errorf("Missing path field on metadata entry") - } - - fields := strings.Split(meta.Path, "/") - filename := fields[len(fields)-1] - - // Generate the actual image entry - description := fmt.Sprintf("%s %s %s", product.OperatingSystem, product.ReleaseTitle, product.Architecture) - if version.Label != "" { - description = fmt.Sprintf("%s (%s)", description, version.Label) - } - - description = fmt.Sprintf("%s (%s)", description, name) - - image := api.Image{} - image.Architecture = architectureName - image.Public = true - image.Size = size - image.CreatedAt = creationDate - image.UploadedAt = creationDate - image.Filename = filename - image.Fingerprint = fingerprint - image.Properties = map[string]string{ - "os": product.OperatingSystem, - "release": product.Release, - "version": product.Version, - "architecture": product.Architecture, - "label": version.Label, - "serial": name, - "description": description, - } - - for lxdReq, lxdReqVal := range product.LXDRequirements { - image.Properties["requirements."+lxdReq] = lxdReqVal - } - - if product.Variant != "" { - image.Properties["variant"] = product.Variant - } - - image.Type = "container" - - if root != nil { - image.Properties["type"] = root.FileType - if root.FileType == "disk1.img" || root.FileType == "disk-kvm.img" || root.FileType == "uefi1.img" { - image.Type = "virtual-machine" - } - } else { - image.Properties["type"] = "tar.gz" - } - - // Clear unset properties - for k, v := range image.Properties { - if v == "" { - delete(image.Properties, k) - } - } - - // Add the provided aliases - if product.Aliases != "" { - image.Aliases = []api.ImageAlias{} - for _, entry := range strings.Split(product.Aliases, ",") { - image.Aliases = append(image.Aliases, api.ImageAlias{Name: entry}) - } - } - - // Attempt to parse the EOL - image.ExpiresAt = time.Unix(0, 0).UTC() - if product.SupportedEOL != "" { - eolDate, err := time.Parse(eolLayout, product.SupportedEOL) - if err == nil { - image.ExpiresAt = eolDate - } - } - - // Set the file list - var imgDownloads [][]string - if root == nil { - imgDownloads = [][]string{{meta.Path, meta.HashSha256, "meta", fmt.Sprintf("%d", meta.Size)}} - } else { - imgDownloads = [][]string{ - {meta.Path, meta.HashSha256, "meta", fmt.Sprintf("%d", meta.Size)}, - {root.Path, root.HashSha256, "root", fmt.Sprintf("%d", root.Size)}} - } - - // Add the deltas - for _, delta := range deltas { - srcImage, ok := product.Versions[delta.DeltaBase] - if !ok { - // Delta for a since expired image - continue - } - - // Locate source image fingerprint - var srcFingerprint string - for _, item := range srcImage.Items { - if item.FileType != "lxd.tar.xz" { - continue - } - - srcFingerprint = item.LXDHashSha256SquashFs - break - } - - if srcFingerprint == "" { - // Couldn't find the image - continue - } - - // Add the delta - imgDownloads = append(imgDownloads, []string{ - delta.Path, - delta.HashSha256, - fmt.Sprintf("root.delta-%s", srcFingerprint), - fmt.Sprintf("%d", delta.Size)}) - } - - // Add the image - downloads[fingerprint] = imgDownloads - images = append(images, image) - - return nil - } - - // Locate a valid LXD image - for _, item := range version.Items { - if item.FileType == "lxd_combined.tar.gz" { - err := addImage(&item, nil) - if err != nil { - continue - } - } - - if item.FileType == "lxd.tar.xz" { - // Locate the root files - for _, subItem := range version.Items { - if shared.StringInSlice(subItem.FileType, []string{"disk1.img", "disk-kvm.img", "uefi1.img", "root.tar.xz", "squashfs"}) { - err := addImage(&item, &subItem) - if err != nil { - continue - } - } - } - } - } - } - } - - return images, downloads -} diff --git a/vendor/github.com/lxc/lxd/shared/simplestreams/simplestreams.go b/vendor/github.com/lxc/lxd/shared/simplestreams/simplestreams.go deleted file mode 100644 index 87889aeb..00000000 --- a/vendor/github.com/lxc/lxd/shared/simplestreams/simplestreams.go +++ /dev/null @@ -1,506 +0,0 @@ -package simplestreams - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/lxc/lxd/shared" - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/osarch" -) - -var urlDefaultOS = map[string]string{ - "https://cloud-images.ubuntu.com": "ubuntu", -} - -// DownloadableFile represents a file with its URL, hash and size. -type DownloadableFile struct { - Path string - Sha256 string - Size int64 -} - -// NewClient returns a simplestreams client for the provided stream URL. -func NewClient(url string, httpClient http.Client, useragent string) *SimpleStreams { - return &SimpleStreams{ - http: &httpClient, - url: url, - cachedProducts: map[string]*Products{}, - useragent: useragent, - } -} - -// SimpleStreams represents a simplestream client. -type SimpleStreams struct { - http *http.Client - url string - useragent string - - cachedStream *Stream - cachedProducts map[string]*Products - cachedImages []api.Image - cachedAliases []extendedAlias - - cachePath string - cacheExpiry time.Duration -} - -// SetCache configures the on-disk cache. -func (s *SimpleStreams) SetCache(path string, expiry time.Duration) { - s.cachePath = path - s.cacheExpiry = expiry -} - -func (s *SimpleStreams) readCache(path string) ([]byte, bool) { - cacheName := filepath.Join(s.cachePath, path) - - if s.cachePath == "" { - return nil, false - } - - if !shared.PathExists(cacheName) { - return nil, false - } - - fi, err := os.Stat(cacheName) - if err != nil { - _ = os.Remove(cacheName) - return nil, false - } - - body, err := os.ReadFile(cacheName) - if err != nil { - _ = os.Remove(cacheName) - return nil, false - } - - expired := time.Since(fi.ModTime()) > s.cacheExpiry - - return body, expired -} - -func (s *SimpleStreams) cachedDownload(path string) ([]byte, error) { - fields := strings.Split(path, "/") - fileName := fields[len(fields)-1] - - // Attempt to get from the cache - cachedBody, expired := s.readCache(fileName) - if cachedBody != nil && !expired { - return cachedBody, nil - } - - // Download from the source - uri, err := shared.JoinUrls(s.url, path) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", uri, nil) - if err != nil { - return nil, err - } - - if s.useragent != "" { - req.Header.Set("User-Agent", s.useragent) - } - - r, err := s.http.Do(req) - if err != nil { - // On local connectivity error, return from cache anyway - if cachedBody != nil { - return cachedBody, nil - } - - return nil, err - } - - defer func() { _ = r.Body.Close() }() - - if r.StatusCode != http.StatusOK { - // On local connectivity error, return from cache anyway - if cachedBody != nil { - return cachedBody, nil - } - - return nil, fmt.Errorf("Unable to fetch %s: %s", uri, r.Status) - } - - body, err := io.ReadAll(r.Body) - if err != nil { - return nil, err - } - - if len(body) == 0 { - return nil, fmt.Errorf("No content in download from %q", uri) - } - - // Attempt to store in cache - if s.cachePath != "" { - cacheName := filepath.Join(s.cachePath, fileName) - _ = os.Remove(cacheName) - _ = os.WriteFile(cacheName, body, 0644) - } - - return body, nil -} - -func (s *SimpleStreams) parseStream() (*Stream, error) { - if s.cachedStream != nil { - return s.cachedStream, nil - } - - path := "streams/v1/index.json" - body, err := s.cachedDownload(path) - if err != nil { - return nil, err - } - - pathURL, _ := shared.JoinUrls(s.url, path) - - // Parse the idnex - stream := Stream{} - err = json.Unmarshal(body, &stream) - if err != nil { - return nil, fmt.Errorf("Failed decoding stream JSON from %q: %w (%q)", pathURL, err, string(body)) - } - - s.cachedStream = &stream - - return &stream, nil -} - -func (s *SimpleStreams) parseProducts(path string) (*Products, error) { - if s.cachedProducts[path] != nil { - return s.cachedProducts[path], nil - } - - body, err := s.cachedDownload(path) - if err != nil { - return nil, err - } - - // Parse the idnex - products := Products{} - err = json.Unmarshal(body, &products) - if err != nil { - return nil, fmt.Errorf("Failed decoding products JSON from %q: %w", path, err) - } - - s.cachedProducts[path] = &products - - return &products, nil -} - -type extendedAlias struct { - Name string - Alias *api.ImageAliasesEntry - Type string - Architecture string -} - -func (s *SimpleStreams) applyAliases(images []api.Image) ([]api.Image, []extendedAlias, error) { - aliasesList := []extendedAlias{} - - // Sort the images so we tag the preferred ones - sort.Sort(sortedImages(images)) - - // Look for the default OS - defaultOS := "" - for k, v := range urlDefaultOS { - if strings.HasPrefix(s.url, k) { - defaultOS = v - break - } - } - - addAlias := func(imageType string, architecture string, name string, fingerprint string) *api.ImageAlias { - if defaultOS != "" { - name = strings.TrimPrefix(name, fmt.Sprintf("%s/", defaultOS)) - } - - for _, entry := range aliasesList { - if entry.Name == name && entry.Type == imageType && entry.Architecture == architecture { - return nil - } - } - - alias := api.ImageAliasesEntry{} - alias.Name = name - alias.Target = fingerprint - alias.Type = imageType - - entry := extendedAlias{ - Name: name, - Type: imageType, - Alias: &alias, - Architecture: architecture, - } - - aliasesList = append(aliasesList, entry) - - return &api.ImageAlias{Name: name} - } - - architectureName, _ := osarch.ArchitectureGetLocal() - - newImages := []api.Image{} - for _, image := range images { - if image.Aliases != nil { - // Build a new list of aliases from the provided ones - aliases := image.Aliases - image.Aliases = nil - - for _, entry := range aliases { - // Short - alias := addAlias(image.Type, image.Architecture, entry.Name, image.Fingerprint) - if alias != nil && architectureName == image.Architecture { - image.Aliases = append(image.Aliases, *alias) - } - - // Medium - alias = addAlias(image.Type, image.Architecture, fmt.Sprintf("%s/%s", entry.Name, image.Properties["architecture"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - } - } - - newImages = append(newImages, image) - } - - return newImages, aliasesList, nil -} - -func (s *SimpleStreams) getImages() ([]api.Image, []extendedAlias, error) { - if s.cachedImages != nil && s.cachedAliases != nil { - return s.cachedImages, s.cachedAliases, nil - } - - images := []api.Image{} - - // Load the stream data - stream, err := s.parseStream() - if err != nil { - return nil, nil, fmt.Errorf("Failed parsing stream: %w", err) - } - - // Iterate through the various indices - for _, entry := range stream.Index { - // We only care about images - if entry.DataType != "image-downloads" { - continue - } - - // No point downloading an empty image list - if len(entry.Products) == 0 { - continue - } - - products, err := s.parseProducts(entry.Path) - if err != nil { - return nil, nil, fmt.Errorf("Failed parsing products: %w", err) - } - - streamImages, _ := products.ToLXD() - images = append(images, streamImages...) - } - - // Setup the aliases - images, aliases, err := s.applyAliases(images) - if err != nil { - return nil, nil, fmt.Errorf("Failed applying aliases: %w", err) - } - - s.cachedImages = images - s.cachedAliases = aliases - - return images, aliases, nil -} - -// GetFiles returns a map of files for the provided image fingerprint. -func (s *SimpleStreams) GetFiles(fingerprint string) (map[string]DownloadableFile, error) { - // Load the main stream - stream, err := s.parseStream() - if err != nil { - return nil, err - } - - // Iterate through the various indices - for _, entry := range stream.Index { - // We only care about images - if entry.DataType != "image-downloads" { - continue - } - - // No point downloading an empty image list - if len(entry.Products) == 0 { - continue - } - - products, err := s.parseProducts(entry.Path) - if err != nil { - return nil, err - } - - images, downloads := products.ToLXD() - - for _, image := range images { - if strings.HasPrefix(image.Fingerprint, fingerprint) { - files := map[string]DownloadableFile{} - - for _, path := range downloads[image.Fingerprint] { - size, err := strconv.ParseInt(path[3], 10, 64) - if err != nil { - return nil, err - } - - files[path[2]] = DownloadableFile{ - Path: path[0], - Sha256: path[1], - Size: size} - } - - return files, nil - } - } - } - - return nil, fmt.Errorf("Couldn't find the requested image") -} - -// ListAliases returns a list of image aliases for the provided image fingerprint. -func (s *SimpleStreams) ListAliases() ([]api.ImageAliasesEntry, error) { - _, aliasesList, err := s.getImages() - if err != nil { - return nil, err - } - - // Sort the list ahead of dedup - sort.Sort(sortedAliases(aliasesList)) - - aliases := []api.ImageAliasesEntry{} - for _, entry := range aliasesList { - dup := false - for _, v := range aliases { - if v.Name == entry.Name && v.Type == entry.Type { - dup = true - } - } - - if dup { - continue - } - - aliases = append(aliases, *entry.Alias) - } - - return aliases, nil -} - -// ListImages returns a list of LXD images. -func (s *SimpleStreams) ListImages() ([]api.Image, error) { - images, _, err := s.getImages() - return images, err -} - -// GetAlias returns a LXD ImageAliasesEntry for the provided alias name. -func (s *SimpleStreams) GetAlias(imageType string, name string) (*api.ImageAliasesEntry, error) { - _, aliasesList, err := s.getImages() - if err != nil { - return nil, err - } - - // Sort the list ahead of dedup - sort.Sort(sortedAliases(aliasesList)) - - var match *api.ImageAliasesEntry - for _, entry := range aliasesList { - if entry.Name != name { - continue - } - - if entry.Type != imageType && imageType != "" { - continue - } - - if match != nil { - if match.Type != entry.Type { - return nil, fmt.Errorf("More than one match for alias '%s'", name) - } - - continue - } - - match = entry.Alias - } - - if match == nil { - return nil, fmt.Errorf("Alias '%s' doesn't exist", name) - } - - return match, nil -} - -// GetAliasArchitectures returns a map of architecture / alias entries for an alias. -func (s *SimpleStreams) GetAliasArchitectures(imageType string, name string) (map[string]*api.ImageAliasesEntry, error) { - aliases := map[string]*api.ImageAliasesEntry{} - - _, aliasesList, err := s.getImages() - if err != nil { - return nil, err - } - - for _, entry := range aliasesList { - if entry.Name != name { - continue - } - - if entry.Type != imageType && imageType != "" { - continue - } - - if aliases[entry.Architecture] != nil { - return nil, fmt.Errorf("More than one match for alias '%s'", name) - } - - aliases[entry.Architecture] = entry.Alias - } - - if len(aliases) == 0 { - return nil, fmt.Errorf("Alias '%s' doesn't exist", name) - } - - return aliases, nil -} - -// GetImage returns a LXD image for the provided image fingerprint. -func (s *SimpleStreams) GetImage(fingerprint string) (*api.Image, error) { - images, _, err := s.getImages() - if err != nil { - return nil, err - } - - matches := []api.Image{} - - for _, image := range images { - if strings.HasPrefix(image.Fingerprint, fingerprint) { - matches = append(matches, image) - } - } - - if len(matches) == 0 { - return nil, fmt.Errorf("The requested image couldn't be found") - } else if len(matches) > 1 { - return nil, fmt.Errorf("More than one match for the provided partial fingerprint") - } - - return &matches[0], nil -} diff --git a/vendor/github.com/lxc/lxd/shared/simplestreams/sort.go b/vendor/github.com/lxc/lxd/shared/simplestreams/sort.go deleted file mode 100644 index d22b13af..00000000 --- a/vendor/github.com/lxc/lxd/shared/simplestreams/sort.go +++ /dev/null @@ -1,125 +0,0 @@ -package simplestreams - -import ( - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/osarch" -) - -var nativeName, _ = osarch.ArchitectureGetLocal() - -type sortedImages []api.Image - -func (a sortedImages) Len() int { - return len(a) -} - -func (a sortedImages) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a sortedImages) Less(i, j int) bool { - // When sorting images, group by: - // - Operating system (os) - // - Release (release) - // - Variant (variant) - // - Serial number / date (serial) - // - Architecture (architecture) - for _, prop := range []string{"os", "release", "variant", "serial", "architecture"} { - if a[i].Properties[prop] == a[j].Properties[prop] { - continue - } - - if a[i].Properties[prop] == "" { - return false - } - - if a[i].Properties[prop] == "" { - return true - } - - if prop == "serial" { - return a[i].Properties[prop] > a[j].Properties[prop] - } - - return a[i].Properties[prop] < a[j].Properties[prop] - } - - if a[i].Properties["type"] != a[j].Properties["type"] { - iScore := 0 - jScore := 0 - - // Image types in order of preference for LXD hosts. - for score, pref := range []string{"squashfs", "root.tar.xz", "disk-kvm.img", "uefi1.img", "disk1.img"} { - if a[i].Properties["type"] == pref { - iScore = score - } - - if a[j].Properties["type"] == pref { - jScore = score - } - } - - return iScore < jScore - } - - return false -} - -type sortedAliases []extendedAlias - -func (a sortedAliases) Len() int { - return len(a) -} - -func (a sortedAliases) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a sortedAliases) Less(i, j int) bool { - // Check functions. - isNative := func(arch string) bool { - return nativeName == arch - } - - isPersonality := func(arch string) bool { - archID, err := osarch.ArchitectureId(nativeName) - if err != nil { - return false - } - - personalities, err := osarch.ArchitecturePersonalities(archID) - if err != nil { - return false - } - - for _, personality := range personalities { - personalityName, err := osarch.ArchitectureName(personality) - if err != nil { - return false - } - - if personalityName == arch { - return true - } - } - - return false - } - - // Same thing. - if a[i].Architecture == a[j].Architecture { - return false - } - - // Look for native. - if isNative(a[i].Architecture) { - return true - } - - // Look for personality. - if isPersonality(a[i].Architecture) && !isNative(a[j].Architecture) { - return true - } - - return false -} diff --git a/vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user.go b/vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user.go deleted file mode 100644 index e322f1b3..00000000 --- a/vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build linux || zos - -package tcp - -import ( - "fmt" - "net" - "time" - - "golang.org/x/sys/unix" -) - -// SetUserTimeout sets the TCP user timeout on a connection's socket. -func SetUserTimeout(conn *net.TCPConn, timeout time.Duration) error { - rawConn, err := conn.SyscallConn() - if err != nil { - return fmt.Errorf("Error getting raw connection: %w", err) - } - - err = rawConn.Control(func(fd uintptr) { - err = unix.SetsockoptInt(int(fd), unix.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) - }) - if err != nil { - return fmt.Errorf("Error setting TCP_USER_TIMEOUT option on socket: %w", err) - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user_noop.go b/vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user_noop.go deleted file mode 100644 index c1ead151..00000000 --- a/vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user_noop.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !linux && !zos - -package tcp - -import ( - "net" - "time" -) - -// SetUserTimeout sets the TCP user timeout on a connection's socket. -// Only supported on Linux and ZOS. -func SetUserTimeout(conn *net.TCPConn, timeout time.Duration) error { - return nil -} diff --git a/vendor/github.com/lxc/lxd/shared/tcp/tcp_timeouts.go b/vendor/github.com/lxc/lxd/shared/tcp/tcp_timeouts.go deleted file mode 100644 index d2122b52..00000000 --- a/vendor/github.com/lxc/lxd/shared/tcp/tcp_timeouts.go +++ /dev/null @@ -1,70 +0,0 @@ -package tcp - -import ( - "crypto/tls" - "fmt" - "net" - "reflect" - "time" - "unsafe" -) - -// ExtractConn tries to extract the underlying net.TCPConn from a tls.Conn or net.Conn. -func ExtractConn(conn net.Conn) (*net.TCPConn, error) { - var tcpConn *net.TCPConn - - // Go doesn't currently expose the underlying TCP connection of a TLS connection, but we need it in order - // to set timeout properties on the connection. We use some reflect/unsafe magic to extract the private - // remote.conn field, which is indeed the underlying TCP connection. - tlsConn, ok := conn.(*tls.Conn) - if ok { - field := reflect.ValueOf(tlsConn).Elem().FieldByName("conn") - field = reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem() - c := field.Interface() - - tcpConn, ok = c.(*net.TCPConn) - if !ok { - return nil, fmt.Errorf("Underlying tls.Conn connection is not a net.TCPConn") - } - } else { - tcpConn, ok = conn.(*net.TCPConn) - if !ok { - return nil, fmt.Errorf("Connection is not a net.TCPConn") - } - } - - return tcpConn, nil -} - -// SetTimeouts sets TCP_USER_TIMEOUT and TCP keep alive timeouts on a connection. -// If userTimeout is zero, then defaults to 2 minutes. -func SetTimeouts(conn *net.TCPConn, userTimeout time.Duration) error { - if userTimeout == 0 { - userTimeout = time.Minute * 2 - } - - // Set TCP_USER_TIMEOUT option to limit the maximum amount of time in ms that transmitted data may remain - // unacknowledged before TCP will forcefully close the corresponding connection and return ETIMEDOUT to the - // application. This combined with the TCP keepalive options on the socket will ensure that should the - // remote side of the connection disappear abruptly that LXD will detect this and close the socket quickly. - // Decreasing the user timeouts allows applications to "fail fast" if so desired. Otherwise it may take - // up to 20 minutes with the current system defaults in a normal WAN environment if there are packets in - // the send queue that will prevent the keepalive timer from working as the retransmission timers kick in. - // See https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=dca43c75e7e545694a9dd6288553f55c53e2a3a3 - err := SetUserTimeout(conn, userTimeout) - if err != nil { - return err - } - - err = conn.SetKeepAlive(true) - if err != nil { - return err - } - - err = conn.SetKeepAlivePeriod(3 * time.Second) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/shared/termios/termios.go b/vendor/github.com/lxc/lxd/shared/termios/termios.go deleted file mode 100644 index 884378cb..00000000 --- a/vendor/github.com/lxc/lxd/shared/termios/termios.go +++ /dev/null @@ -1 +0,0 @@ -package termios diff --git a/vendor/github.com/lxc/lxd/shared/termios/termios_linux.go b/vendor/github.com/lxc/lxd/shared/termios/termios_linux.go deleted file mode 100644 index dfc96838..00000000 --- a/vendor/github.com/lxc/lxd/shared/termios/termios_linux.go +++ /dev/null @@ -1,80 +0,0 @@ -//go:build linux - -package termios - -import ( - "golang.org/x/sys/unix" -) - -const ioctlReadTermios = unix.TCGETS -const ioctlWriteTermios = unix.TCSETS - -// State contains the state of a terminal. -type State struct { - Termios unix.Termios -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - _, err := GetState(fd) - return err == nil -} - -// GetState returns the current state of a terminal which may be useful to restore the terminal after a signal. -func GetState(fd int) (*State, error) { - termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - if err != nil { - return nil, err - } - - state := State{} - state.Termios = *termios - - return &state, nil -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (int, int, error) { - winsize, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return -1, -1, err - } - - return int(winsize.Col), int(winsize.Row), nil -} - -// MakeRaw put the terminal connected to the given file descriptor into raw mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd int) (*State, error) { - oldState, err := GetState(fd) - if err != nil { - return nil, err - } - - newState := *oldState - - // This attempts to replicate the behaviour documented for cfmakeraw in the termios(3) manpage. - newState.Termios.Iflag &^= unix.BRKINT | unix.ICRNL | unix.INPCK | unix.ISTRIP | unix.IXON - newState.Termios.Oflag &^= unix.OPOST - newState.Termios.Cflag &^= unix.CSIZE | unix.PARENB - newState.Termios.Cflag |= unix.CS8 - newState.Termios.Lflag &^= unix.ECHO | unix.ICANON | unix.IEXTEN | unix.ISIG - newState.Termios.Cc[unix.VMIN] = 1 - newState.Termios.Cc[unix.VTIME] = 0 - - err = Restore(fd, &newState) - if err != nil { - return nil, err - } - - return oldState, nil -} - -// Restore restores the terminal connected to the given file descriptor to a previous state. -func Restore(fd int, state *State) error { - err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.Termios) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/lxc/lxd/shared/termios/termios_other.go b/vendor/github.com/lxc/lxd/shared/termios/termios_other.go deleted file mode 100644 index 9fbf5a6f..00000000 --- a/vendor/github.com/lxc/lxd/shared/termios/termios_other.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !linux - -package termios - -import ( - "golang.org/x/crypto/ssh/terminal" -) - -// State contains the state of a terminal. -type State terminal.State - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - return terminal.IsTerminal(fd) -} - -// GetState returns the current state of a terminal which may be useful to restore the terminal after a signal. -func GetState(fd int) (*State, error) { - state, err := terminal.GetState(fd) - if err != nil { - return nil, err - } - - currentState := State(*state) - return ¤tState, nil -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (int, int, error) { - return terminal.GetSize(fd) -} - -// MakeRaw put the terminal connected to the given file descriptor into raw mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd int) (*State, error) { - state, err := terminal.MakeRaw(fd) - if err != nil { - return nil, err - } - - oldState := State(*state) - return &oldState, nil -} - -// Restore restores the terminal connected to the given file descriptor to a previous state. -func Restore(fd int, state *State) error { - newState := terminal.State(*state) - - return terminal.Restore(fd, &newState) -} diff --git a/vendor/github.com/lxc/lxd/shared/units/units.go b/vendor/github.com/lxc/lxd/shared/units/units.go deleted file mode 100644 index afbce528..00000000 --- a/vendor/github.com/lxc/lxd/shared/units/units.go +++ /dev/null @@ -1,194 +0,0 @@ -package units - -import ( - "fmt" - "strconv" -) - -func handleOverflow(val int64, mult int64) (int64, error) { - result := val * mult - if val == 0 || mult == 0 || val == 1 || mult == 1 { - return result, nil - } - - if val != 0 && (result/val) != mult { - return -1, fmt.Errorf("Overflow multiplying %d with %d", val, mult) - } - - return result, nil -} - -// ParseByteSizeString parses a human representation of an amount of -// data into a number of bytes. -func ParseByteSizeString(input string) (int64, error) { - // Empty input - if input == "" { - return 0, nil - } - - // Find where the suffix begins - suffixLen := 0 - for i, chr := range []byte(input) { - _, err := strconv.Atoi(string([]byte{chr})) - if err != nil { - suffixLen = len(input) - i - break - } - } - - if suffixLen == len(input) { - return -1, fmt.Errorf("Invalid value: %s", input) - } - - // Extract the suffix - suffix := input[len(input)-suffixLen:] - - // Extract the value - value := input[0 : len(input)-suffixLen] - valueInt, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return -1, fmt.Errorf("Invalid integer: %s", input) - } - - // Figure out the multiplicator - multiplicator := int64(0) - switch suffix { - case "", "B", " bytes": - multiplicator = 1 - case "kB": - multiplicator = 1000 - case "MB": - multiplicator = 1000 * 1000 - case "GB": - multiplicator = 1000 * 1000 * 1000 - case "TB": - multiplicator = 1000 * 1000 * 1000 * 1000 - case "PB": - multiplicator = 1000 * 1000 * 1000 * 1000 * 1000 - case "EB": - multiplicator = 1000 * 1000 * 1000 * 1000 * 1000 * 1000 - case "KiB": - multiplicator = 1024 - case "MiB": - multiplicator = 1024 * 1024 - case "GiB": - multiplicator = 1024 * 1024 * 1024 - case "TiB": - multiplicator = 1024 * 1024 * 1024 * 1024 - case "PiB": - multiplicator = 1024 * 1024 * 1024 * 1024 * 1024 - case "EiB": - multiplicator = 1024 * 1024 * 1024 * 1024 * 1024 * 1024 - default: - return -1, fmt.Errorf("Invalid value: %s", input) - } - - return handleOverflow(valueInt, multiplicator) -} - -// ParseBitSizeString parses a human representation of an amount of -// data into a number of bits. -func ParseBitSizeString(input string) (int64, error) { - // Empty input - if input == "" { - return 0, nil - } - - // Find where the suffix begins - suffixLen := 0 - for i, chr := range []byte(input) { - _, err := strconv.Atoi(string([]byte{chr})) - if err != nil { - suffixLen = len(input) - i - break - } - } - - if suffixLen == len(input) { - return -1, fmt.Errorf("Invalid value: %s", input) - } - - // Extract the suffix - suffix := input[len(input)-suffixLen:] - - // Extract the value - value := input[0 : len(input)-suffixLen] - valueInt, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return -1, fmt.Errorf("Invalid integer: %s", input) - } - - // Figure out the multiplicator - multiplicator := int64(0) - switch suffix { - case "", "bit": - multiplicator = 1 - case "kbit": - multiplicator = 1000 - case "Mbit": - multiplicator = 1000 * 1000 - case "Gbit": - multiplicator = 1000 * 1000 * 1000 - case "Tbit": - multiplicator = 1000 * 1000 * 1000 * 1000 - case "Pbit": - multiplicator = 1000 * 1000 * 1000 * 1000 * 1000 - case "Ebit": - multiplicator = 1000 * 1000 * 1000 * 1000 * 1000 * 1000 - case "Kibit": - multiplicator = 1024 - case "Mibit": - multiplicator = 1024 * 1024 - case "Gibit": - multiplicator = 1024 * 1024 * 1024 - case "Tibit": - multiplicator = 1024 * 1024 * 1024 * 1024 - case "Pibit": - multiplicator = 1024 * 1024 * 1024 * 1024 * 1024 - case "Eibit": - multiplicator = 1024 * 1024 * 1024 * 1024 * 1024 * 1024 - - default: - return -1, fmt.Errorf("Unsupported suffix: %s", suffix) - } - - return handleOverflow(valueInt, multiplicator) -} - -// GetByteSizeString takes a number of bytes and precision and returns a -// human representation of the amount of data. -func GetByteSizeString(input int64, precision uint) string { - if input < 1000 { - return fmt.Sprintf("%dB", input) - } - - value := float64(input) - - for _, unit := range []string{"kB", "MB", "GB", "TB", "PB", "EB"} { - value = value / 1000 - if value < 1000 { - return fmt.Sprintf("%.*f%s", precision, value, unit) - } - } - - return fmt.Sprintf("%.*fEB", precision, value) -} - -// GetByteSizeStringIEC takes a number of bytes and precision and returns a -// human representation of the amount of data using IEC units. -func GetByteSizeStringIEC(input int64, precision uint) string { - if input < 1024 { - return fmt.Sprintf("%dB", input) - } - - value := float64(input) - - for _, unit := range []string{"KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} { - value = value / 1024 - if value < 1024 { - return fmt.Sprintf("%.*f%s", precision, value, unit) - } - } - - return fmt.Sprintf("%.*fEB", precision, value) -} diff --git a/vendor/github.com/lxc/lxd/shared/util.go b/vendor/github.com/lxc/lxd/shared/util.go deleted file mode 100644 index e1e0db79..00000000 --- a/vendor/github.com/lxc/lxd/shared/util.go +++ /dev/null @@ -1,1358 +0,0 @@ -package shared - -import ( - "bufio" - "bytes" - "context" - "crypto/rand" - "encoding/base64" - "encoding/gob" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "io" - "net/http" - "net/url" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "time" - - "github.com/flosch/pongo2" - - "github.com/lxc/lxd/lxd/revert" - "github.com/lxc/lxd/shared/api" - "github.com/lxc/lxd/shared/cancel" - "github.com/lxc/lxd/shared/ioprogress" - "github.com/lxc/lxd/shared/units" -) - -const SnapshotDelimiter = "/" -const HTTPSDefaultPort = 8443 -const HTTPDefaultPort = 8080 -const HTTPSMetricsDefaultPort = 9100 - -// HTTPSStorageBucketsDefaultPort the default port for the storage buckets listener. -const HTTPSStorageBucketsDefaultPort = 9000 - -// URLEncode encodes a path and query parameters to a URL. -func URLEncode(path string, query map[string]string) (string, error) { - u, err := url.Parse(path) - if err != nil { - return "", err - } - - params := url.Values{} - for key, value := range query { - params.Add(key, value) - } - - u.RawQuery = params.Encode() - return u.String(), nil -} - -// AddSlash adds a slash to the end of paths if they don't already have one. -// This can be useful for rsyncing things, since rsync has behavior present on -// the presence or absence of a trailing slash. -func AddSlash(path string) string { - if path[len(path)-1] != '/' { - return path + "/" - } - - return path -} - -func PathExists(name string) bool { - _, err := os.Lstat(name) - if err != nil && os.IsNotExist(err) { - return false - } - - return true -} - -// PathIsEmpty checks if the given path is empty. -func PathIsEmpty(path string) (bool, error) { - f, err := os.Open(path) - if err != nil { - return false, err - } - - defer func() { _ = f.Close() }() - - // read in ONLY one file - _, err = f.Readdir(1) - - // and if the file is EOF... well, the dir is empty. - if err == io.EOF { - return true, nil - } - - return false, err -} - -// IsDir returns true if the given path is a directory. -func IsDir(name string) bool { - stat, err := os.Stat(name) - if err != nil { - return false - } - - return stat.IsDir() -} - -// IsUnixSocket returns true if the given path is either a Unix socket -// or a symbolic link pointing at a Unix socket. -func IsUnixSocket(path string) bool { - stat, err := os.Stat(path) - if err != nil { - return false - } - - return (stat.Mode() & os.ModeSocket) == os.ModeSocket -} - -// HostPathFollow takes a valid path (from HostPath) and resolves it -// all the way to its target or to the last which can be resolved. -func HostPathFollow(path string) string { - // Ignore empty paths - if len(path) == 0 { - return path - } - - // Don't prefix stdin/stdout - if path == "-" { - return path - } - - // Check if we're running in a snap package. - if !InSnap() { - return path - } - - // Handle relative paths - if path[0] != os.PathSeparator { - // Use the cwd of the parent as snap-confine alters our own cwd on launch - ppid := os.Getppid() - if ppid < 1 { - return path - } - - pwd, err := os.Readlink(fmt.Sprintf("/proc/%d/cwd", ppid)) - if err != nil { - return path - } - - path = filepath.Clean(strings.Join([]string{pwd, path}, string(os.PathSeparator))) - } - - // Rely on "readlink -m" to do the right thing. - path = HostPath(path) - for { - target, err := RunCommand("readlink", "-m", path) - if err != nil { - return path - } - - target = strings.TrimSpace(target) - - if path == HostPath(target) { - return path - } - - path = HostPath(target) - } -} - -// HostPath returns the host path for the provided path -// On a normal system, this does nothing -// When inside of a snap environment, returns the real path. -func HostPath(path string) string { - // Ignore empty paths - if len(path) == 0 { - return path - } - - // Don't prefix stdin/stdout - if path == "-" { - return path - } - - // Check if we're running in a snap package - if !InSnap() { - return path - } - - // Handle relative paths - if path[0] != os.PathSeparator { - // Use the cwd of the parent as snap-confine alters our own cwd on launch - ppid := os.Getppid() - if ppid < 1 { - return path - } - - pwd, err := os.Readlink(fmt.Sprintf("/proc/%d/cwd", ppid)) - if err != nil { - return path - } - - path = filepath.Clean(strings.Join([]string{pwd, path}, string(os.PathSeparator))) - } - - // Check if the path is already snap-aware - for _, prefix := range []string{"/dev", "/snap", "/var/snap", "/var/lib/snapd"} { - if path == prefix || strings.HasPrefix(path, fmt.Sprintf("%s/", prefix)) { - return path - } - } - - return fmt.Sprintf("/var/lib/snapd/hostfs%s", path) -} - -// VarPath returns the provided path elements joined by a slash and -// appended to the end of $LXD_DIR, which defaults to /var/lib/lxd. -func VarPath(path ...string) string { - varDir := os.Getenv("LXD_DIR") - if varDir == "" { - varDir = "/var/lib/lxd" - } - - items := []string{varDir} - items = append(items, path...) - return filepath.Join(items...) -} - -// CachePath returns the directory that LXD should its cache under. If LXD_DIR is -// set, this path is $LXD_DIR/cache, otherwise it is /var/cache/lxd. -func CachePath(path ...string) string { - varDir := os.Getenv("LXD_DIR") - logDir := "/var/cache/lxd" - if varDir != "" { - logDir = filepath.Join(varDir, "cache") - } - - items := []string{logDir} - items = append(items, path...) - return filepath.Join(items...) -} - -// LogPath returns the directory that LXD should put logs under. If LXD_DIR is -// set, this path is $LXD_DIR/logs, otherwise it is /var/log/lxd. -func LogPath(path ...string) string { - varDir := os.Getenv("LXD_DIR") - logDir := "/var/log/lxd" - if varDir != "" { - logDir = filepath.Join(varDir, "logs") - } - - items := []string{logDir} - items = append(items, path...) - return filepath.Join(items...) -} - -func ParseLXDFileHeaders(headers http.Header) (uid int64, gid int64, mode int, type_ string, write string) { - uid, err := strconv.ParseInt(headers.Get("X-LXD-uid"), 10, 64) - if err != nil { - uid = -1 - } - - gid, err = strconv.ParseInt(headers.Get("X-LXD-gid"), 10, 64) - if err != nil { - gid = -1 - } - - mode, err = strconv.Atoi(headers.Get("X-LXD-mode")) - if err != nil { - mode = -1 - } else { - rawMode, err := strconv.ParseInt(headers.Get("X-LXD-mode"), 0, 0) - if err == nil { - mode = int(os.FileMode(rawMode) & os.ModePerm) - } - } - - type_ = headers.Get("X-LXD-type") - /* backwards compat: before "type" was introduced, we could only - * manipulate files - */ - if type_ == "" { - type_ = "file" - } - - write = headers.Get("X-LXD-write") - /* backwards compat: before "write" was introduced, we could only - * overwrite files - */ - if write == "" { - write = "overwrite" - } - - return uid, gid, mode, type_, write -} - -func ReaderToChannel(r io.Reader, bufferSize int) <-chan []byte { - if bufferSize <= 128*1024 { - bufferSize = 128 * 1024 - } - - ch := make(chan ([]byte)) - - go func() { - readSize := 128 * 1024 - offset := 0 - buf := make([]byte, bufferSize) - - for { - read := buf[offset : offset+readSize] - nr, err := r.Read(read) - offset += nr - if offset > 0 && (offset+readSize >= bufferSize || err != nil) { - ch <- buf[0:offset] - offset = 0 - buf = make([]byte, bufferSize) - } - - if err != nil { - close(ch) - break - } - } - }() - - return ch -} - -// Returns a random base64 encoded string from crypto/rand. -func RandomCryptoString() (string, error) { - buf := make([]byte, 32) - n, err := rand.Read(buf) - if err != nil { - return "", err - } - - if n != len(buf) { - return "", fmt.Errorf("not enough random bytes read") - } - - return hex.EncodeToString(buf), nil -} - -func AtoiEmptyDefault(s string, def int) (int, error) { - if s == "" { - return def, nil - } - - return strconv.Atoi(s) -} - -func ReadStdin() ([]byte, error) { - buf := bufio.NewReader(os.Stdin) - line, _, err := buf.ReadLine() - if err != nil { - return nil, err - } - - return line, nil -} - -func WriteAll(w io.Writer, data []byte) error { - buf := bytes.NewBuffer(data) - - toWrite := int64(buf.Len()) - for { - n, err := io.Copy(w, buf) - if err != nil { - return err - } - - toWrite -= n - if toWrite <= 0 { - return nil - } - } -} - -// QuotaWriter returns an error once a given write quota gets exceeded. -type QuotaWriter struct { - writer io.Writer - quota int64 - n int64 -} - -// NewQuotaWriter returns a new QuotaWriter wrapping the given writer. -// -// If the given quota is negative, then no quota is applied. -func NewQuotaWriter(writer io.Writer, quota int64) *QuotaWriter { - return &QuotaWriter{ - writer: writer, - quota: quota, - } -} - -// Write implements the Writer interface. -func (w *QuotaWriter) Write(p []byte) (n int, err error) { - if w.quota >= 0 { - w.n += int64(len(p)) - if w.n > w.quota { - return 0, fmt.Errorf("reached %d bytes, exceeding quota of %d", w.n, w.quota) - } - } - return w.writer.Write(p) -} - -// FileMove tries to move a file by using os.Rename, -// if that fails it tries to copy the file and remove the source. -func FileMove(oldPath string, newPath string) error { - err := os.Rename(oldPath, newPath) - if err == nil { - return nil - } - - err = FileCopy(oldPath, newPath) - if err != nil { - return err - } - - _ = os.Remove(oldPath) - - return nil -} - -// FileCopy copies a file, overwriting the target if it exists. -func FileCopy(source string, dest string) error { - fi, err := os.Lstat(source) - if err != nil { - return err - } - - _, uid, gid := GetOwnerMode(fi) - - if fi.Mode()&os.ModeSymlink != 0 { - target, err := os.Readlink(source) - if err != nil { - return err - } - - if PathExists(dest) { - err = os.Remove(dest) - if err != nil { - return err - } - } - - err = os.Symlink(target, dest) - if err != nil { - return err - } - - if runtime.GOOS != "windows" { - return os.Lchown(dest, uid, gid) - } - - return nil - } - - s, err := os.Open(source) - if err != nil { - return err - } - - defer func() { _ = s.Close() }() - - d, err := os.Create(dest) - if err != nil { - if os.IsExist(err) { - d, err = os.OpenFile(dest, os.O_WRONLY, fi.Mode()) - if err != nil { - return err - } - } else { - return err - } - } - - _, err = io.Copy(d, s) - if err != nil { - return err - } - - /* chown not supported on windows */ - if runtime.GOOS != "windows" { - err = d.Chown(uid, gid) - if err != nil { - return err - } - } - - return d.Close() -} - -// DirCopy copies a directory recursively, overwriting the target if it exists. -func DirCopy(source string, dest string) error { - // Get info about source. - info, err := os.Stat(source) - if err != nil { - return fmt.Errorf("failed to get source directory info: %w", err) - } - - if !info.IsDir() { - return fmt.Errorf("source is not a directory") - } - - // Remove dest if it already exists. - if PathExists(dest) { - err := os.RemoveAll(dest) - if err != nil { - return fmt.Errorf("failed to remove destination directory %s: %w", dest, err) - } - } - - // Create dest. - err = os.MkdirAll(dest, info.Mode()) - if err != nil { - return fmt.Errorf("failed to create destination directory %s: %w", dest, err) - } - - // Copy all files. - entries, err := os.ReadDir(source) - if err != nil { - return fmt.Errorf("failed to read source directory %s: %w", source, err) - } - - for _, entry := range entries { - sourcePath := filepath.Join(source, entry.Name()) - destPath := filepath.Join(dest, entry.Name()) - - if entry.IsDir() { - err := DirCopy(sourcePath, destPath) - if err != nil { - return fmt.Errorf("failed to copy sub-directory from %s to %s: %w", sourcePath, destPath, err) - } - } else { - err := FileCopy(sourcePath, destPath) - if err != nil { - return fmt.Errorf("failed to copy file from %s to %s: %w", sourcePath, destPath, err) - } - } - } - - return nil -} - -type BytesReadCloser struct { - Buf *bytes.Buffer -} - -func (r BytesReadCloser) Read(b []byte) (n int, err error) { - return r.Buf.Read(b) -} - -func (r BytesReadCloser) Close() error { - /* no-op since we're in memory */ - return nil -} - -func IsSnapshot(name string) bool { - return strings.Contains(name, SnapshotDelimiter) -} - -func MkdirAllOwner(path string, perm os.FileMode, uid int, gid int) error { - // This function is a slightly modified version of MkdirAll from the Go standard library. - // https://golang.org/src/os/path.go?s=488:535#L9 - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - - return fmt.Errorf("path exists but isn't a directory") - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = MkdirAllOwner(path[0:j-1], perm, uid, gid) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - - err_chown := os.Chown(path, uid, gid) - if err_chown != nil { - return err_chown - } - - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - - return err - } - - return nil -} - -// HasKey returns true if map has key. -func HasKey[K comparable, V any](key K, m map[K]V) bool { - _, found := m[key] - - return found -} - -func StringInSlice(key string, list []string) bool { - for _, entry := range list { - if entry == key { - return true - } - } - return false -} - -// RemoveElementsFromStringSlice returns a slice equivalent to removing the given elements from the given list. -// Elements not present in the list are ignored. -func RemoveElementsFromStringSlice(list []string, elements ...string) []string { - for i := len(elements) - 1; i >= 0; i-- { - element := elements[i] - match := false - for j := len(list) - 1; j >= 0; j-- { - if element == list[j] { - match = true - list = append(list[:j], list[j+1:]...) - break - } - } - - if match { - elements = append(elements[:i], elements[i+1:]...) - } - } - - return list -} - -// StringHasPrefix returns true if value has one of the supplied prefixes. -func StringHasPrefix(value string, prefixes ...string) bool { - for _, prefix := range prefixes { - if strings.HasPrefix(value, prefix) { - return true - } - } - return false -} - -func IntInSlice(key int, list []int) bool { - for _, entry := range list { - if entry == key { - return true - } - } - return false -} - -func Int64InSlice(key int64, list []int64) bool { - for _, entry := range list { - if entry == key { - return true - } - } - return false -} - -func Uint64InSlice(key uint64, list []uint64) bool { - for _, entry := range list { - if entry == key { - return true - } - } - return false -} - -// IsTrue returns true if value is "true", "1", "yes" or "on" (case insensitive). -func IsTrue(value string) bool { - return StringInSlice(strings.ToLower(value), []string{"true", "1", "yes", "on"}) -} - -// IsTrueOrEmpty returns true if value is empty or if IsTrue() returns true. -func IsTrueOrEmpty(value string) bool { - return value == "" || IsTrue(value) -} - -// IsFalse returns true if value is "false", "0", "no" or "off" (case insensitive). -func IsFalse(value string) bool { - return StringInSlice(strings.ToLower(value), []string{"false", "0", "no", "off"}) -} - -// IsFalseOrEmpty returns true if value is empty or if IsFalse() returns true. -func IsFalseOrEmpty(value string) bool { - return value == "" || IsFalse(value) -} - -func IsUserConfig(key string) bool { - return strings.HasPrefix(key, "user.") -} - -// StringMapHasStringKey returns true if any of the supplied keys are present in the map. -func StringMapHasStringKey(m map[string]string, keys ...string) bool { - for _, k := range keys { - _, ok := m[k] - if ok { - return true - } - } - - return false -} - -func IsBlockdev(fm os.FileMode) bool { - return ((fm&os.ModeDevice != 0) && (fm&os.ModeCharDevice == 0)) -} - -func IsBlockdevPath(pathName string) bool { - sb, err := os.Stat(pathName) - if err != nil { - return false - } - - fm := sb.Mode() - return ((fm&os.ModeDevice != 0) && (fm&os.ModeCharDevice == 0)) -} - -// DeepCopy copies src to dest by using encoding/gob so its not that fast. -func DeepCopy(src, dest any) error { - buff := new(bytes.Buffer) - enc := gob.NewEncoder(buff) - dec := gob.NewDecoder(buff) - err := enc.Encode(src) - if err != nil { - return err - } - - err = dec.Decode(dest) - if err != nil { - return err - } - - return nil -} - -func RunningInUserNS() bool { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - return false - } - - defer func() { _ = file.Close() }() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return false - } - - line := string(l) - var a, b, c int64 - _, _ = fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - if a == 0 && b == 0 && c == 4294967295 { - return false - } - - return true -} - -// Spawn the editor with a temporary YAML file for editing configs. -func TextEditor(inPath string, inContent []byte) ([]byte, error) { - var f *os.File - var err error - var path string - - // Detect the text editor to use - editor := os.Getenv("VISUAL") - if editor == "" { - editor = os.Getenv("EDITOR") - if editor == "" { - for _, p := range []string{"editor", "vi", "emacs", "nano"} { - _, err := exec.LookPath(p) - if err == nil { - editor = p - break - } - } - if editor == "" { - return []byte{}, fmt.Errorf("No text editor found, please set the EDITOR environment variable") - } - } - } - - if inPath == "" { - // If provided input, create a new file - f, err = os.CreateTemp("", "lxd_editor_") - if err != nil { - return []byte{}, err - } - - revert := revert.New() - defer revert.Fail() - revert.Add(func() { - _ = f.Close() - _ = os.Remove(f.Name()) - }) - - err = os.Chmod(f.Name(), 0600) - if err != nil { - return []byte{}, err - } - - _, err = f.Write(inContent) - if err != nil { - return []byte{}, err - } - - err = f.Close() - if err != nil { - return []byte{}, err - } - - path = fmt.Sprintf("%s.yaml", f.Name()) - err = os.Rename(f.Name(), path) - if err != nil { - return []byte{}, err - } - - revert.Success() - revert.Add(func() { _ = os.Remove(path) }) - } else { - path = inPath - } - - cmdParts := strings.Fields(editor) - cmd := exec.Command(cmdParts[0], append(cmdParts[1:], path)...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - if err != nil { - return []byte{}, err - } - - content, err := os.ReadFile(path) - if err != nil { - return []byte{}, err - } - - return content, nil -} - -func ParseMetadata(metadata any) (map[string]any, error) { - newMetadata := make(map[string]any) - s := reflect.ValueOf(metadata) - if !s.IsValid() { - return nil, nil - } - - if s.Kind() == reflect.Map { - for _, k := range s.MapKeys() { - if k.Kind() != reflect.String { - return nil, fmt.Errorf("Invalid metadata provided (key isn't a string)") - } - - newMetadata[k.String()] = s.MapIndex(k).Interface() - } - } else if s.Kind() == reflect.Ptr && !s.Elem().IsValid() { - return nil, nil - } else { - return nil, fmt.Errorf("Invalid metadata provided (type isn't a map)") - } - - return newMetadata, nil -} - -// RemoveDuplicatesFromString removes all duplicates of the string 'sep' -// from the specified string 's'. Leading and trailing occurrences of sep -// are NOT removed (duplicate leading/trailing are). Performs poorly if -// there are multiple consecutive redundant separators. -func RemoveDuplicatesFromString(s string, sep string) string { - dup := sep + sep - for s = strings.Replace(s, dup, sep, -1); strings.Contains(s, dup); s = strings.Replace(s, dup, sep, -1) { - - } - - return s -} - -// RunError is the error from the RunCommand family of functions. -type RunError struct { - cmd string - args []string - err error - stdout *bytes.Buffer - stderr *bytes.Buffer -} - -func (e RunError) Error() string { - if e.stderr.Len() == 0 { - return fmt.Sprintf("Failed to run: %s %s: %v", e.cmd, strings.Join(e.args, " "), e.err) - } - - return fmt.Sprintf("Failed to run: %s %s: %v (%s)", e.cmd, strings.Join(e.args, " "), e.err, strings.TrimSpace(e.stderr.String())) -} - -func (e RunError) Unwrap() error { - return e.err -} - -// StdOut returns the stdout buffer. -func (e RunError) StdOut() *bytes.Buffer { - return e.stdout -} - -// StdErr returns the stdout buffer. -func (e RunError) StdErr() *bytes.Buffer { - return e.stderr -} - -// NewRunError returns new RunError. -func NewRunError(cmd string, args []string, err error, stdout *bytes.Buffer, stderr *bytes.Buffer) error { - return RunError{ - cmd: cmd, - args: args, - err: err, - stdout: stdout, - stderr: stderr, - } -} - -// RunCommandSplit runs a command with a supplied environment and optional arguments and returns the -// resulting stdout and stderr output as separate variables. If the supplied environment is nil then -// the default environment is used. If the command fails to start or returns a non-zero exit code -// then an error is returned containing the output of stderr too. -func RunCommandSplit(ctx context.Context, env []string, filesInherit []*os.File, name string, arg ...string) (string, string, error) { - cmd := exec.CommandContext(ctx, name, arg...) - - if env != nil { - cmd.Env = env - } - - if filesInherit != nil { - cmd.ExtraFiles = filesInherit - } - - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err := cmd.Run() - if err != nil { - return stdout.String(), stderr.String(), NewRunError(name, arg, err, &stdout, &stderr) - } - - return stdout.String(), stderr.String(), nil -} - -// RunCommandContext runs a command with optional arguments and returns stdout. If the command fails to -// start or returns a non-zero exit code then an error is returned containing the output of stderr. -func RunCommandContext(ctx context.Context, name string, arg ...string) (string, error) { - stdout, _, err := RunCommandSplit(ctx, nil, nil, name, arg...) - return stdout, err -} - -// RunCommand runs a command with optional arguments and returns stdout. If the command fails to -// start or returns a non-zero exit code then an error is returned containing the output of stderr. -// Deprecated: Use RunCommandContext. -func RunCommand(name string, arg ...string) (string, error) { - stdout, _, err := RunCommandSplit(context.TODO(), nil, nil, name, arg...) - return stdout, err -} - -// RunCommandInheritFds runs a command with optional arguments and passes a set -// of file descriptors to the newly created process, returning stdout. If the -// command fails to start or returns a non-zero exit code then an error is -// returned containing the output of stderr. -func RunCommandInheritFds(ctx context.Context, filesInherit []*os.File, name string, arg ...string) (string, error) { - stdout, _, err := RunCommandSplit(ctx, nil, filesInherit, name, arg...) - return stdout, err -} - -// RunCommandCLocale runs a command with a LANG=C.UTF-8 and LANGUAGE=en environment set with optional arguments and -// returns stdout. If the command fails to start or returns a non-zero exit code then an error is -// returned containing the output of stderr. -func RunCommandCLocale(name string, arg ...string) (string, error) { - stdout, _, err := RunCommandSplit(context.TODO(), append(os.Environ(), "LANG=C.UTF-8", "LANGUAGE=en"), nil, name, arg...) - return stdout, err -} - -// RunCommandWithFds runs a command with supplied file descriptors. -func RunCommandWithFds(ctx context.Context, stdin io.Reader, stdout io.Writer, name string, arg ...string) error { - cmd := exec.CommandContext(ctx, name, arg...) - - if stdin != nil { - cmd.Stdin = stdin - } - - if stdout != nil { - cmd.Stdout = stdout - } - - var buffer bytes.Buffer - cmd.Stderr = &buffer - - err := cmd.Run() - if err != nil { - return NewRunError(name, arg, err, nil, &buffer) - } - - return nil -} - -// TryRunCommand runs the specified command up to 20 times with a 500ms delay between each call -// until it runs without an error. If after 20 times it is still failing then returns the error. -func TryRunCommand(name string, arg ...string) (string, error) { - var err error - var output string - - for i := 0; i < 20; i++ { - output, err = RunCommand(name, arg...) - if err == nil { - break - } - - time.Sleep(500 * time.Millisecond) - } - - return output, err -} - -func TimeIsSet(ts time.Time) bool { - if ts.Unix() <= 0 { - return false - } - - if ts.UTC().Unix() <= 0 { - return false - } - - return true -} - -// EscapePathFstab escapes a path fstab-style. -// This ensures that getmntent_r() and friends can correctly parse stuff like -// /some/wacky path with spaces /some/wacky target with spaces. -func EscapePathFstab(path string) string { - r := strings.NewReplacer( - " ", "\\040", - "\t", "\\011", - "\n", "\\012", - "\\", "\\\\") - return r.Replace(path) -} - -func SetProgressMetadata(metadata map[string]any, stage, displayPrefix string, percent, processed, speed int64) { - progress := make(map[string]string) - // stage, percent, speed sent for API callers. - progress["stage"] = stage - if processed > 0 { - progress["processed"] = strconv.FormatInt(processed, 10) - } - - if percent > 0 { - progress["percent"] = strconv.FormatInt(percent, 10) - } - - progress["speed"] = strconv.FormatInt(speed, 10) - metadata["progress"] = progress - - // _progress with formatted text sent for lxc cli. - if percent > 0 { - metadata[stage+"_progress"] = fmt.Sprintf("%s: %d%% (%s/s)", displayPrefix, percent, units.GetByteSizeString(speed, 2)) - } else if processed > 0 { - metadata[stage+"_progress"] = fmt.Sprintf("%s: %s (%s/s)", displayPrefix, units.GetByteSizeString(processed, 2), units.GetByteSizeString(speed, 2)) - } else { - metadata[stage+"_progress"] = fmt.Sprintf("%s: %s/s", displayPrefix, units.GetByteSizeString(speed, 2)) - } -} - -func DownloadFileHash(ctx context.Context, httpClient *http.Client, useragent string, progress func(progress ioprogress.ProgressData), canceler *cancel.HTTPRequestCanceller, filename string, url string, hash string, hashFunc hash.Hash, target io.WriteSeeker) (int64, error) { - // Always seek to the beginning - _, _ = target.Seek(0, 0) - - var req *http.Request - var err error - - // Prepare the download request - if ctx != nil { - req, err = http.NewRequestWithContext(ctx, "GET", url, nil) - } else { - req, err = http.NewRequest("GET", url, nil) - } - - if err != nil { - return -1, err - } - - if useragent != "" { - req.Header.Set("User-Agent", useragent) - } - - // Perform the request - r, doneCh, err := cancel.CancelableDownload(canceler, httpClient, req) - if err != nil { - return -1, err - } - - defer func() { _ = r.Body.Close() }() - defer close(doneCh) - - if r.StatusCode != http.StatusOK { - return -1, fmt.Errorf("Unable to fetch %s: %s", url, r.Status) - } - - // Handle the data - body := r.Body - if progress != nil { - body = &ioprogress.ProgressReader{ - ReadCloser: r.Body, - Tracker: &ioprogress.ProgressTracker{ - Length: r.ContentLength, - Handler: func(percent int64, speed int64) { - if filename != "" { - progress(ioprogress.ProgressData{Text: fmt.Sprintf("%s: %d%% (%s/s)", filename, percent, units.GetByteSizeString(speed, 2))}) - } else { - progress(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))}) - } - }, - }, - } - } - - var size int64 - - if hashFunc != nil { - size, err = io.Copy(io.MultiWriter(target, hashFunc), body) - if err != nil { - return -1, err - } - - result := fmt.Sprintf("%x", hashFunc.Sum(nil)) - if result != hash { - return -1, fmt.Errorf("Hash mismatch for %s: %s != %s", url, result, hash) - } - } else { - size, err = io.Copy(target, body) - if err != nil { - return -1, err - } - } - - return size, nil -} - -func ParseNumberFromFile(file string) (int64, error) { - f, err := os.Open(file) - if err != nil { - return int64(0), err - } - - defer func() { _ = f.Close() }() - - buf := make([]byte, 4096) - n, err := f.Read(buf) - if err != nil { - return int64(0), err - } - - str := strings.TrimSpace(string(buf[0:n])) - nr, err := strconv.Atoi(str) - if err != nil { - return int64(0), err - } - - return int64(nr), nil -} - -type ReadSeeker struct { - io.Reader - io.Seeker -} - -func NewReadSeeker(reader io.Reader, seeker io.Seeker) *ReadSeeker { - return &ReadSeeker{Reader: reader, Seeker: seeker} -} - -func (r *ReadSeeker) Read(p []byte) (n int, err error) { - return r.Reader.Read(p) -} - -func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { - return r.Seeker.Seek(offset, whence) -} - -// RenderTemplate renders a pongo2 template. -func RenderTemplate(template string, ctx pongo2.Context) (string, error) { - // Load template from string - tpl, err := pongo2.FromString("{% autoescape off %}" + template + "{% endautoescape %}") - if err != nil { - return "", err - } - - // Get rendered template - ret, err := tpl.Execute(ctx) - if err != nil { - return ret, err - } - - // Looks like we're nesting templates so run pongo again - if strings.Contains(ret, "{{") || strings.Contains(ret, "{%") { - return RenderTemplate(ret, ctx) - } - - return ret, err -} - -// GetExpiry returns the expiry date based on the reference date and a length of time. -// The length of time format is "(S|M|H|d|w|m|y)", and can contain multiple such fields, e.g. -// "1d 3H" (1 day and 3 hours). -func GetExpiry(refDate time.Time, s string) (time.Time, error) { - expr := strings.TrimSpace(s) - - if expr == "" { - return time.Time{}, nil - } - - re := regexp.MustCompile(`^(\d+)(S|M|H|d|w|m|y)$`) - expiry := map[string]int{ - "S": 0, - "M": 0, - "H": 0, - "d": 0, - "w": 0, - "m": 0, - "y": 0, - } - - values := strings.Split(expr, " ") - - if len(values) == 0 { - return time.Time{}, nil - } - - for _, value := range values { - fields := re.FindStringSubmatch(value) - if fields == nil { - return time.Time{}, fmt.Errorf("Invalid expiry expression") - } - - if expiry[fields[2]] > 0 { - // We don't allow fields to be set multiple times - return time.Time{}, fmt.Errorf("Invalid expiry expression") - } - - val, err := strconv.Atoi(fields[1]) - if err != nil { - return time.Time{}, err - } - - expiry[fields[2]] = val - } - - t := refDate.AddDate(expiry["y"], expiry["m"], expiry["d"]+expiry["w"]*7).Add( - time.Hour*time.Duration(expiry["H"]) + time.Minute*time.Duration(expiry["M"]) + time.Second*time.Duration(expiry["S"])) - - return t, nil -} - -// InSnap returns true if we're running inside the LXD snap. -func InSnap() bool { - // Detect the snap. - _, snapPath := os.LookupEnv("SNAP") - snapName := os.Getenv("SNAP_NAME") - if snapPath && snapName == "lxd" { - return true - } - - return false -} - -// JoinUrlPath return the join of the input urls/paths sanitized. -func JoinUrls(baseUrl, p string) (string, error) { - u, err := url.Parse(baseUrl) - if err != nil { - return "", err - } - - u.Path = path.Join(u.Path, p) - return u.String(), nil -} - -// SplitNTrimSpace returns result of strings.SplitN() and then strings.TrimSpace() on each element. -// Accepts nilIfEmpty argument which if true, will return nil slice if s is empty (after trimming space). -func SplitNTrimSpace(s string, sep string, n int, nilIfEmpty bool) []string { - if nilIfEmpty && strings.TrimSpace(s) == "" { - return nil - } - - parts := strings.SplitN(s, sep, n) - - for i, v := range parts { - parts[i] = strings.TrimSpace(v) - } - - return parts -} - -// JoinTokenDecode decodes a base64 and JSON encode join token. -func JoinTokenDecode(input string) (*api.ClusterMemberJoinToken, error) { - joinTokenJSON, err := base64.StdEncoding.DecodeString(input) - if err != nil { - return nil, err - } - - var j api.ClusterMemberJoinToken - err = json.Unmarshal(joinTokenJSON, &j) - if err != nil { - return nil, err - } - - if j.ServerName == "" { - return nil, fmt.Errorf("No server name in join token") - } - - if len(j.Addresses) < 1 { - return nil, fmt.Errorf("No cluster member addresses in join token") - } - - if j.Secret == "" { - return nil, fmt.Errorf("No secret in join token") - } - - if j.Fingerprint == "" { - return nil, fmt.Errorf("No certificate fingerprint in join token") - } - - return &j, nil -} diff --git a/vendor/github.com/lxc/lxd/shared/util_linux.go b/vendor/github.com/lxc/lxd/shared/util_linux.go deleted file mode 100644 index 071f060c..00000000 --- a/vendor/github.com/lxc/lxd/shared/util_linux.go +++ /dev/null @@ -1,607 +0,0 @@ -//go:build linux - -package shared - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - "sync/atomic" - "unsafe" - - "github.com/pkg/xattr" - "golang.org/x/sys/unix" - - "github.com/lxc/lxd/lxd/revert" - "github.com/lxc/lxd/shared/logger" - "github.com/lxc/lxd/shared/units" -) - -// --- pure Go functions --- - -func GetFileStat(p string) (uid int, gid int, major uint32, minor uint32, inode uint64, nlink int, err error) { - var stat unix.Stat_t - err = unix.Lstat(p, &stat) - if err != nil { - return - } - - uid = int(stat.Uid) - gid = int(stat.Gid) - inode = uint64(stat.Ino) - nlink = int(stat.Nlink) - if stat.Mode&unix.S_IFBLK != 0 || stat.Mode&unix.S_IFCHR != 0 { - major = unix.Major(uint64(stat.Rdev)) - minor = unix.Minor(uint64(stat.Rdev)) - } - - return -} - -// GetPathMode returns a os.FileMode for the provided path. -func GetPathMode(path string) (os.FileMode, error) { - fi, err := os.Stat(path) - if err != nil { - return os.FileMode(0000), err - } - - mode, _, _ := GetOwnerMode(fi) - return mode, nil -} - -func SetSize(fd int, width int, height int) (err error) { - var dimensions [4]uint16 - dimensions[0] = uint16(height) - dimensions[1] = uint16(width) - - _, _, errno := unix.Syscall6(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0) - if errno != 0 { - return errno - } - - return nil -} - -// GetAllXattr retrieves all extended attributes associated with a file, directory or symbolic link. -func GetAllXattr(path string) (map[string]string, error) { - xattrNames, err := xattr.LList(path) - if err != nil { - // Some filesystems don't support llistxattr() for various reasons. - // Interpret this as a set of no xattrs, instead of an error. - if errors.Is(err, unix.EOPNOTSUPP) { - return nil, nil - } - - return nil, fmt.Errorf("Failed getting extended attributes from %q: %w", path, err) - } - - var xattrs = make(map[string]string, len(xattrNames)) - for _, xattrName := range xattrNames { - value, err := xattr.LGet(path, xattrName) - if err != nil { - return nil, fmt.Errorf("Failed getting %q extended attribute from %q: %w", xattrName, path, err) - } - - xattrs[xattrName] = string(value) - } - - return xattrs, nil -} - -var ObjectFound = fmt.Errorf("Found requested object") - -func LookupUUIDByBlockDevPath(diskDevice string) (string, error) { - uuid := "" - readUUID := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if (info.Mode() & os.ModeSymlink) == os.ModeSymlink { - link, err := os.Readlink(path) - if err != nil { - return err - } - - // filepath.Join() will call Clean() on the result and - // thus resolve those ugly "../../" parts that make it - // hard to compare the strings. - absPath := filepath.Join("/dev/disk/by-uuid", link) - if absPath == diskDevice { - uuid = path - // Will allows us to avoid needlessly travers - // the whole directory. - return ObjectFound - } - } - return nil - } - - err := filepath.Walk("/dev/disk/by-uuid", readUUID) - if err != nil && err != ObjectFound { - return "", fmt.Errorf("Failed to detect UUID: %s", err) - } - - if uuid == "" { - return "", fmt.Errorf("Failed to detect UUID") - } - - lastSlash := strings.LastIndex(uuid, "/") - return uuid[lastSlash+1:], nil -} - -// Detect whether err is an errno. -func GetErrno(err error) (errno error, iserrno bool) { - sysErr, ok := err.(*os.SyscallError) - if ok { - return sysErr.Err, true - } - - pathErr, ok := err.(*os.PathError) - if ok { - return pathErr.Err, true - } - - tmpErrno, ok := err.(unix.Errno) - if ok { - return tmpErrno, true - } - - return nil, false -} - -// Utsname returns the same info as unix.Utsname, as strings. -type Utsname struct { - Sysname string - Nodename string - Release string - Version string - Machine string - Domainname string -} - -// Uname returns Utsname as strings. -func Uname() (*Utsname, error) { - /* - * Based on: https://groups.google.com/forum/#!topic/golang-nuts/Jel8Bb-YwX8 - * there is really no better way to do this, which is - * unfortunate. Also, we ditch the more accepted CharsToString - * version in that thread, since it doesn't seem as portable, - * viz. github issue #206. - */ - - uname := unix.Utsname{} - err := unix.Uname(&uname) - if err != nil { - return nil, err - } - - return &Utsname{ - Sysname: intArrayToString(uname.Sysname), - Nodename: intArrayToString(uname.Nodename), - Release: intArrayToString(uname.Release), - Version: intArrayToString(uname.Version), - Machine: intArrayToString(uname.Machine), - Domainname: intArrayToString(uname.Domainname), - }, nil -} - -func intArrayToString(arr any) string { - slice := reflect.ValueOf(arr) - s := "" - for i := 0; i < slice.Len(); i++ { - val := slice.Index(i) - valInt := int64(-1) - - switch val.Kind() { - case reflect.Int: - case reflect.Int8: - valInt = int64(val.Int()) - case reflect.Uint: - case reflect.Uint8: - valInt = int64(val.Uint()) - default: - continue - } - - if valInt == 0 { - break - } - - s += string(byte(valInt)) - } - - return s -} - -func DeviceTotalMemory() (int64, error) { - return GetMeminfo("MemTotal") -} - -func GetMeminfo(field string) (int64, error) { - // Open /proc/meminfo - f, err := os.Open("/proc/meminfo") - if err != nil { - return -1, err - } - - defer func() { _ = f.Close() }() - - // Read it line by line - scan := bufio.NewScanner(f) - for scan.Scan() { - line := scan.Text() - - // We only care about MemTotal - if !strings.HasPrefix(line, field+":") { - continue - } - - // Extract the before last (value) and last (unit) fields - fields := strings.Split(line, " ") - value := fields[len(fields)-2] + fields[len(fields)-1] - - // Feed the result to units.ParseByteSizeString to get an int value - valueBytes, err := units.ParseByteSizeString(value) - if err != nil { - return -1, err - } - - return valueBytes, nil - } - - return -1, fmt.Errorf("Couldn't find %s", field) -} - -// OpenPtyInDevpts creates a new PTS pair, configures them and returns them. -func OpenPtyInDevpts(devpts_fd int, uid, gid int64) (*os.File, *os.File, error) { - revert := revert.New() - defer revert.Fail() - var fd int - var ptx *os.File - var err error - - // Create a PTS pair. - if devpts_fd >= 0 { - fd, err = unix.Openat(devpts_fd, "ptmx", unix.O_RDWR|unix.O_CLOEXEC|unix.O_NOCTTY, 0) - } else { - fd, err = unix.Openat(-1, "/dev/ptmx", unix.O_RDWR|unix.O_CLOEXEC|unix.O_NOCTTY, 0) - } - - if err != nil { - return nil, nil, err - } - - ptx = os.NewFile(uintptr(fd), "/dev/pts/ptmx") - revert.Add(func() { _ = ptx.Close() }) - - // Unlock the ptx and pty. - val := 0 - _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(ptx.Fd()), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&val))) - if errno != 0 { - return nil, nil, unix.Errno(errno) - } - - var pty *os.File - ptyFd, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(ptx.Fd()), unix.TIOCGPTPEER, uintptr(unix.O_NOCTTY|unix.O_CLOEXEC|os.O_RDWR)) - // We can only fallback to looking up the fd in /dev/pts when we aren't dealing with the container's devpts instance. - if errno == 0 { - // Get the pty side. - id := 0 - _, _, errno = unix.Syscall(unix.SYS_IOCTL, uintptr(ptx.Fd()), unix.TIOCGPTN, uintptr(unsafe.Pointer(&id))) - if errno != 0 { - return nil, nil, unix.Errno(errno) - } - - pty = os.NewFile(ptyFd, fmt.Sprintf("/dev/pts/%d", id)) - } else { - if devpts_fd >= 0 { - return nil, nil, fmt.Errorf("TIOCGPTPEER required but not available") - } - - // Get the pty side. - id := 0 - _, _, errno = unix.Syscall(unix.SYS_IOCTL, uintptr(ptx.Fd()), unix.TIOCGPTN, uintptr(unsafe.Pointer(&id))) - if errno != 0 { - return nil, nil, unix.Errno(errno) - } - - // Open the pty. - pty, err = os.OpenFile(fmt.Sprintf("/dev/pts/%d", id), unix.O_NOCTTY|unix.O_CLOEXEC|os.O_RDWR, 0) - if err != nil { - return nil, nil, err - } - } - revert.Add(func() { _ = pty.Close() }) - - // Configure both sides - for _, entry := range []*os.File{ptx, pty} { - // Get termios. - t, err := unix.IoctlGetTermios(int(entry.Fd()), unix.TCGETS) - if err != nil { - return nil, nil, err - } - - // Set flags. - t.Cflag |= unix.IMAXBEL - t.Cflag |= unix.IUTF8 - t.Cflag |= unix.BRKINT - t.Cflag |= unix.IXANY - t.Cflag |= unix.HUPCL - - // Set termios. - err = unix.IoctlSetTermios(int(entry.Fd()), unix.TCSETS, t) - if err != nil { - return nil, nil, err - } - - // Set the default window size. - sz := &unix.Winsize{ - Col: 80, - Row: 25, - } - - err = unix.IoctlSetWinsize(int(entry.Fd()), unix.TIOCSWINSZ, sz) - if err != nil { - return nil, nil, err - } - - // Set CLOEXEC. - _, _, errno = unix.Syscall(unix.SYS_FCNTL, uintptr(entry.Fd()), unix.F_SETFD, unix.FD_CLOEXEC) - if errno != 0 { - return nil, nil, unix.Errno(errno) - } - } - - // Fix the ownership of the pty side. - err = unix.Fchown(int(pty.Fd()), int(uid), int(gid)) - if err != nil { - return nil, nil, err - } - - revert.Success() - return ptx, pty, nil -} - -// OpenPty creates a new PTS pair, configures them and returns them. -func OpenPty(uid, gid int64) (*os.File, *os.File, error) { - return OpenPtyInDevpts(-1, uid, gid) -} - -// Extensively commented directly in the code. Please leave the comments! -// Looking at this in a couple of months noone will know why and how this works -// anymore. -func ExecReaderToChannel(r io.Reader, bufferSize int, exited <-chan struct{}, fd int) <-chan []byte { - if bufferSize <= (128 * 1024) { - bufferSize = (128 * 1024) - } - - ch := make(chan ([]byte)) - - channelCtx, channelCancel := context.WithCancel(context.Background()) - - // [1]: This function has just one job: Dealing with the case where we - // are running an interactive shell session where we put a process in - // the background that does hold stdin/stdout open, but does not - // generate any output at all. This case cannot be dealt within the - // following function call. Here's why: Assume the above case, now the - // attached child (the shell in this example) exits. This will not - // generate any poll() event: We won't get POLLHUP because the - // background process is holding stdin/stdout open and no one is writing - // to it. So we effectively block on GetPollRevents() in the function - // below. Hence, we use another go routine here who's only job is to - // handle that case: When we detect that the child has exited we check - // whether a POLLIN or POLLHUP event has been generated. If not, we know - // that there's nothing buffered on stdout and exit. - var attachedChildIsDead int32 = 0 - go func() { - <-exited - - atomic.StoreInt32(&attachedChildIsDead, 1) - - defer channelCancel() - - ret, revents, err := GetPollRevents(fd, 0, (unix.POLLIN | unix.POLLPRI | unix.POLLERR | unix.POLLHUP | unix.POLLRDHUP | unix.POLLNVAL)) - if ret < 0 { - logger.Errorf("Failed to poll(POLLIN | POLLPRI | POLLHUP | POLLRDHUP) on file descriptor: %s", err) - // Something went wrong so let's exited otherwise we - // end up in an endless loop. - } else if ret > 0 { - if (revents & unix.POLLERR) > 0 { - logger.Warnf("Detected poll(POLLERR) event") - // Read end has likely been closed so again, - // avoid an endless loop. - } else if (revents & unix.POLLNVAL) > 0 { - logger.Debugf("Detected poll(POLLNVAL) event") - // Well, someone closed the fd haven't they? So - // let's go home. - } - } else if ret == 0 { - logger.Debugf("No data in stdout: exiting") - } - }() - - go func() { - readSize := (128 * 1024) - offset := 0 - buf := make([]byte, bufferSize) - avoidAtomicLoad := false - - defer close(ch) - defer channelCancel() - for { - nr := 0 - var err error - - ret, revents, err := GetPollRevents(fd, -1, (unix.POLLIN | unix.POLLPRI | unix.POLLERR | unix.POLLHUP | unix.POLLRDHUP | unix.POLLNVAL)) - if ret < 0 { - // This condition is only reached in cases where we are massively f*cked since we even handle - // EINTR in the underlying C wrapper around poll(). So let's exit here. - logger.Errorf("Failed to poll(POLLIN | POLLPRI | POLLERR | POLLHUP | POLLRDHUP) on file descriptor: %s. Exiting", err) - return - } - - // [2]: If the process exits before all its data has been read by us and no other process holds stdin or - // stdout open, then we will observe a (POLLHUP | POLLRDHUP | POLLIN) event. This means, we need to - // keep on reading from the pty file descriptor until we get a simple POLLHUP back. - both := ((revents & (unix.POLLIN | unix.POLLPRI)) > 0) && ((revents & (unix.POLLHUP | unix.POLLRDHUP)) > 0) - if both { - logger.Debugf("Detected poll(POLLIN | POLLPRI | POLLHUP | POLLRDHUP) event") - read := buf[offset : offset+readSize] - nr, err = r.Read(read) - } - - if (revents & unix.POLLERR) > 0 { - logger.Warnf("Detected poll(POLLERR) event: exiting") - return - } else if (revents & unix.POLLNVAL) > 0 { - logger.Warnf("Detected poll(POLLNVAL) event: exiting") - return - } - - if ((revents & (unix.POLLIN | unix.POLLPRI)) > 0) && !both { - // This might appear unintuitive at first but is actually a nice trick: Assume we are running - // a shell session in a container and put a process in the background that is writing to - // stdout. Now assume the attached process (aka the shell in this example) exits because we - // used Ctrl+D to send EOF or something. If no other process would be holding stdout open we - // would expect to observe either a (POLLHUP | POLLRDHUP | POLLIN | POLLPRI) event if there - // is still data buffered from the previous process or a simple (POLLHUP | POLLRDHUP) if - // no data is buffered. The fact that we only observe a (POLLIN | POLLPRI) event means that - // another process is holding stdout open and is writing to it. - // One counter argument that can be leveraged is (brauner looks at tycho :)) - // "Hey, you need to write at least one additional tty buffer to make sure that - // everything that the attached child has written is actually shown." - // The answer to that is: - // "This case can only happen if the process has exited and has left data in stdout which - // would generate a (POLLIN | POLLPRI | POLLHUP | POLLRDHUP) event and this case is already - // handled and triggers another codepath. (See [2].)" - if avoidAtomicLoad || atomic.LoadInt32(&attachedChildIsDead) == 1 { - avoidAtomicLoad = true - // Handle race between atomic.StorInt32() in the go routine - // explained in [1] and atomic.LoadInt32() in the go routine - // here: - // We need to check for (POLLHUP | POLLRDHUP) here again since we might - // still be handling a pure POLLIN event from a write prior to the childs - // exit. But the child might have exited right before and performed - // atomic.StoreInt32() to update attachedChildIsDead before we - // performed our atomic.LoadInt32(). This means we accidentally hit this - // codepath and are misinformed about the available poll() events. So we - // need to perform a non-blocking poll() again to exclude that case: - // - // - If we detect no (POLLHUP | POLLRDHUP) event we know the child - // has already exited but someone else is holding stdin/stdout open and - // writing to it. - // Note that his case should only ever be triggered in situations like - // running a shell and doing stuff like: - // > ./lxc exec xen1 -- bash - // root@xen1:~# yes & - // . - // . - // . - // now send Ctrl+D or type "exit". By the time the Ctrl+D/exit event is - // triggered, we will have read all of the childs data it has written to - // stdout and so we can assume that anything that comes now belongs to - // the process that is holding stdin/stdout open. - // - // - If we detect a (POLLHUP | POLLRDHUP) event we know that we've - // hit this codepath on accident caused by the race between - // atomic.StoreInt32() in the go routine explained in [1] and - // atomic.LoadInt32() in this go routine. So the next call to - // GetPollRevents() will either return - // (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLRDHUP) - // or (POLLHUP | POLLRDHUP). Both will trigger another codepath (See [2].) - // that takes care that all data of the child that is buffered in - // stdout is written out. - ret, revents, err := GetPollRevents(fd, 0, (unix.POLLIN | unix.POLLPRI | unix.POLLERR | unix.POLLHUP | unix.POLLRDHUP | unix.POLLNVAL)) - if ret < 0 { - logger.Errorf("Failed to poll(POLLIN | POLLPRI | POLLERR | POLLHUP | POLLRDHUP) on file descriptor: %s. Exiting", err) - return - } else if (revents & (unix.POLLHUP | unix.POLLRDHUP | unix.POLLERR | unix.POLLNVAL)) == 0 { - logger.Debugf("Exiting but background processes are still running") - return - } - } - read := buf[offset : offset+readSize] - nr, err = r.Read(read) - } - - // The attached process has exited and we have read all data that may have - // been buffered. - if ((revents & (unix.POLLHUP | unix.POLLRDHUP)) > 0) && !both { - logger.Debugf("Detected poll(POLLHUP) event: exiting") - return - } - - // Check if channel is closed before potentially writing to it below. - if channelCtx.Err() != nil { - logger.Debug("Detected closed channel: exiting") - return - } - - offset += nr - if offset > 0 && (offset+readSize >= bufferSize || err != nil) { - ch <- buf[0:offset] - offset = 0 - buf = make([]byte, bufferSize) - } - } - }() - - return ch -} - -// GetPollRevents poll for events on provided fd. -func GetPollRevents(fd int, timeout int, flags int) (int, int, error) { - pollFd := unix.PollFd{ - Fd: int32(fd), - Events: int16(flags), - Revents: 0, - } - - pollFds := []unix.PollFd{pollFd} - -again: - n, err := unix.Poll(pollFds, timeout) - if err != nil { - if err == unix.EAGAIN || err == unix.EINTR { - goto again - } - - return -1, -1, err - } - - return n, int(pollFds[0].Revents), err -} - -// ExitStatus extracts the exit status from the error returned by exec.Cmd. -// If a nil err is provided then an exit status of 0 is returned along with the nil error. -// If a valid exit status can be extracted from err then it is returned along with a nil error. -// If no valid exit status can be extracted then a -1 exit status is returned along with the err provided. -func ExitStatus(err error) (int, error) { - if err == nil { - return 0, err // No error exit status. - } - - var exitErr *exec.ExitError - - // Detect and extract ExitError to check the embedded exit status. - if errors.As(err, &exitErr) { - // If the process was signaled, extract the signal. - status, isWaitStatus := exitErr.Sys().(unix.WaitStatus) - if isWaitStatus && status.Signaled() { - return 128 + int(status.Signal()), nil // 128 + n == Fatal error signal "n" - } - - // Otherwise capture the exit status from the command. - return exitErr.ExitCode(), nil - } - - return -1, err // Not able to extract an exit status. -} diff --git a/vendor/github.com/lxc/lxd/shared/util_unix.go b/vendor/github.com/lxc/lxd/shared/util_unix.go deleted file mode 100644 index 5aad2310..00000000 --- a/vendor/github.com/lxc/lxd/shared/util_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !windows - -package shared - -import ( - "os" - "syscall" -) - -func GetOwnerMode(fInfo os.FileInfo) (os.FileMode, int, int) { - mode := fInfo.Mode() - uid := int(fInfo.Sys().(*syscall.Stat_t).Uid) - gid := int(fInfo.Sys().(*syscall.Stat_t).Gid) - return mode, uid, gid -} diff --git a/vendor/github.com/lxc/lxd/shared/util_windows.go b/vendor/github.com/lxc/lxd/shared/util_windows.go deleted file mode 100644 index 85dcfa95..00000000 --- a/vendor/github.com/lxc/lxd/shared/util_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build windows - -package shared - -import ( - "os" -) - -func GetOwnerMode(fInfo os.FileInfo) (os.FileMode, int, int) { - return fInfo.Mode(), -1, -1 -} diff --git a/vendor/github.com/lxc/lxd/shared/validate/validate.go b/vendor/github.com/lxc/lxd/shared/validate/validate.go deleted file mode 100644 index e161abb4..00000000 --- a/vendor/github.com/lxc/lxd/shared/validate/validate.go +++ /dev/null @@ -1,872 +0,0 @@ -package validate - -import ( - "bytes" - "crypto/x509" - "encoding/pem" - "fmt" - "net" - "net/url" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/kballard/go-shellquote" - "github.com/pborman/uuid" - "github.com/robfig/cron/v3" - "gopkg.in/yaml.v2" - - "github.com/lxc/lxd/shared/osarch" - "github.com/lxc/lxd/shared/units" -) - -// stringInSlice checks whether the supplied string is present in the supplied slice. -func stringInSlice(key string, list []string) bool { - for _, entry := range list { - if entry == key { - return true - } - } - return false -} - -// Required returns function that runs one or more validators, all must pass without error. -func Required(validators ...func(value string) error) func(value string) error { - return func(value string) error { - for _, validator := range validators { - err := validator(value) - if err != nil { - return err - } - } - - return nil - } -} - -// Optional wraps Required() function to make it return nil if value is empty string. -func Optional(validators ...func(value string) error) func(value string) error { - return func(value string) error { - if value == "" { - return nil - } - - return Required(validators...)(value) - } -} - -// IsInt64 validates whether the string can be converted to an int64. -func IsInt64(value string) error { - _, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("Invalid value for an integer %q", value) - } - - return nil -} - -// IsUint8 validates whether the string can be converted to an uint8. -func IsUint8(value string) error { - _, err := strconv.ParseUint(value, 10, 8) - if err != nil { - return fmt.Errorf("Invalid value for an integer %q. Must be between 0 and 255", value) - } - - return nil -} - -// IsUint32 validates whether the string can be converted to an uint32. -func IsUint32(value string) error { - _, err := strconv.ParseUint(value, 10, 32) - if err != nil { - return fmt.Errorf("Invalid value for uint32 %q: %w", value, err) - } - - return nil -} - -// ParseUint32Range parses a uint32 range in the form "number" or "start-end". -// Returns the start number and the size of the range. -func ParseUint32Range(value string) (uint32, uint32, error) { - rangeParts := strings.SplitN(value, "-", 2) - rangeLen := len(rangeParts) - if rangeLen != 1 && rangeLen != 2 { - return 0, 0, fmt.Errorf("Range must contain a single number or start and end numbers") - } - - startNum, err := strconv.ParseUint(rangeParts[0], 10, 32) - if err != nil { - return 0, 0, fmt.Errorf("Invalid number %q", value) - } - - var rangeSize uint32 = 1 - - if rangeLen == 2 { - endNum, err := strconv.ParseUint(rangeParts[1], 10, 32) - if err != nil { - return 0, 0, fmt.Errorf("Invalid end number %q", value) - } - - if startNum >= endNum { - return 0, 0, fmt.Errorf("Start number %d must be lower than end number %d", startNum, endNum) - } - - rangeSize += uint32(endNum) - uint32(startNum) - } - - return uint32(startNum), rangeSize, nil -} - -// IsUint32Range validates whether the string is a uint32 range in the form "number" or "start-end". -func IsUint32Range(value string) error { - _, _, err := ParseUint32Range(value) - return err -} - -// IsInRange checks whether an integer is within a specific range. -func IsInRange(min int64, max int64) func(value string) error { - return func(value string) error { - valueInt, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("Invalid value for an integer %q", value) - } - - if valueInt < min || valueInt > max { - return fmt.Errorf("Value isn't within valid range. Must be between %d and %d", min, max) - } - - return nil - } -} - -// IsPriority validates priority number. -func IsPriority(value string) error { - valueInt, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("Invalid value for an integer %q", value) - } - - if valueInt < 0 || valueInt > 10 { - return fmt.Errorf("Invalid value for a limit %q. Must be between 0 and 10", value) - } - - return nil -} - -// IsBool validates if string can be understood as a bool. -func IsBool(value string) error { - if !stringInSlice(strings.ToLower(value), []string{"true", "false", "yes", "no", "1", "0", "on", "off"}) { - return fmt.Errorf("Invalid value for a boolean %q", value) - } - - return nil -} - -// IsOneOf checks whether the string is present in the supplied slice of strings. -func IsOneOf(valid ...string) func(value string) error { - return func(value string) error { - if !stringInSlice(value, valid) { - return fmt.Errorf("Invalid value %q (not one of %s)", value, valid) - } - - return nil - } -} - -// IsAny accepts all strings as valid. -func IsAny(value string) error { - return nil -} - -// IsListOf returns a validator for a comma separated list of values. -func IsListOf(validator func(value string) error) func(value string) error { - return func(value string) error { - for _, v := range strings.Split(value, ",") { - v = strings.TrimSpace(v) - - err := validator(v) - if err != nil { - return fmt.Errorf("Item %q: %w", v, err) - } - } - - return nil - } -} - -// IsNotEmpty requires a non-empty string. -func IsNotEmpty(value string) error { - if value == "" { - return fmt.Errorf("Required value") - } - - return nil -} - -// IsSize checks if string is valid size according to units.ParseByteSizeString. -func IsSize(value string) error { - _, err := units.ParseByteSizeString(value) - if err != nil { - return err - } - - return nil -} - -// IsDeviceID validates string is four lowercase hex characters suitable as Vendor or Device ID. -func IsDeviceID(value string) error { - regexHexLc, err := regexp.Compile("^[0-9a-f]+$") - if err != nil { - return err - } - - if len(value) != 4 || !regexHexLc.MatchString(value) { - return fmt.Errorf("Invalid value, must be four lower case hex characters") - } - - return nil -} - -// IsInterfaceName validates a real network interface name. -func IsInterfaceName(value string) error { - // Validate the length. - if len(value) < 2 { - return fmt.Errorf("Network interface is too short (minimum 2 characters)") - } - - if len(value) > 15 { - return fmt.Errorf("Network interface is too long (maximum 15 characters)") - } - - // Validate the character set. - match, _ := regexp.MatchString("^[-_a-zA-Z0-9.]+$", value) - if !match { - return fmt.Errorf("Network interface contains invalid characters") - } - - return nil -} - -// IsNetworkMAC validates an Ethernet MAC address. e.g. "00:00:5e:00:53:01". -func IsNetworkMAC(value string) error { - _, err := net.ParseMAC(value) - - // Check is valid Ethernet MAC length and delimiter. - if err != nil || len(value) != 17 || strings.ContainsAny(value, "-.") { - return fmt.Errorf("Invalid MAC address, must be 6 bytes of hex separated by colons") - } - - return nil -} - -// IsNetworkAddress validates an IP (v4 or v6) address string. -func IsNetworkAddress(value string) error { - ip := net.ParseIP(value) - if ip == nil { - return fmt.Errorf("Not an IP address %q", value) - } - - return nil -} - -// IsNetwork validates an IP network CIDR string. -func IsNetwork(value string) error { - ip, subnet, err := net.ParseCIDR(value) - if err != nil { - return err - } - - if ip.String() != subnet.IP.String() { - return fmt.Errorf("Not an IP network address %q", value) - } - - return nil -} - -// IsNetworkAddressCIDR validates an IP addresss string in CIDR format. -func IsNetworkAddressCIDR(value string) error { - _, _, err := net.ParseCIDR(value) - if err != nil { - return err - } - - return nil -} - -// IsNetworkRange validates an IP range in the format "start-end". -func IsNetworkRange(value string) error { - ips := strings.SplitN(value, "-", 2) - if len(ips) != 2 { - return fmt.Errorf("IP range must contain start and end IP addresses") - } - - startIP := net.ParseIP(ips[0]) - if startIP == nil { - return fmt.Errorf("Start not an IP address %q", ips[0]) - } - - endIP := net.ParseIP(ips[1]) - if endIP == nil { - return fmt.Errorf("End not an IP address %q", ips[1]) - } - - if (startIP.To4() != nil) != (endIP.To4() != nil) { - return fmt.Errorf("Start and end IP addresses are not in same family") - } - - if bytes.Compare(startIP, endIP) > 0 { - return fmt.Errorf("Start IP address must be before or equal to end IP address") - } - - return nil -} - -// IsNetworkV4 validates an IPv4 CIDR string. -func IsNetworkV4(value string) error { - ip, subnet, err := net.ParseCIDR(value) - if err != nil { - return err - } - - if ip.To4() == nil { - return fmt.Errorf("Not an IPv4 network %q", value) - } - - if ip.String() != subnet.IP.String() { - return fmt.Errorf("Not an IPv4 network address %q", value) - } - - return nil -} - -// IsNetworkAddressV4 validates an IPv4 addresss string. -func IsNetworkAddressV4(value string) error { - ip := net.ParseIP(value) - if ip == nil || ip.To4() == nil { - return fmt.Errorf("Not an IPv4 address %q", value) - } - - return nil -} - -// IsNetworkAddressCIDRV4 validates an IPv4 addresss string in CIDR format. -func IsNetworkAddressCIDRV4(value string) error { - ip, subnet, err := net.ParseCIDR(value) - if err != nil { - return err - } - - if ip.To4() == nil { - return fmt.Errorf("Not an IPv4 address %q", value) - } - - if ip.String() == subnet.IP.String() { - return fmt.Errorf("Not a usable IPv4 address %q", value) - } - - return nil -} - -// IsNetworkRangeV4 validates an IPv4 range in the format "start-end". -func IsNetworkRangeV4(value string) error { - ips := strings.SplitN(value, "-", 2) - if len(ips) != 2 { - return fmt.Errorf("IP range must contain start and end IP addresses") - } - - for _, ip := range ips { - err := IsNetworkAddressV4(ip) - if err != nil { - return err - } - } - - return nil -} - -// IsNetworkV6 validates an IPv6 CIDR string. -func IsNetworkV6(value string) error { - ip, subnet, err := net.ParseCIDR(value) - if err != nil { - return err - } - - if ip == nil || ip.To4() != nil { - return fmt.Errorf("Not an IPv6 network %q", value) - } - - if ip.String() != subnet.IP.String() { - return fmt.Errorf("Not an IPv6 network address %q", value) - } - - return nil -} - -// IsNetworkAddressV6 validates an IPv6 addresss string. -func IsNetworkAddressV6(value string) error { - ip := net.ParseIP(value) - if ip == nil || ip.To4() != nil { - return fmt.Errorf("Not an IPv6 address %q", value) - } - - return nil -} - -// IsNetworkAddressCIDRV6 validates an IPv6 addresss string in CIDR format. -func IsNetworkAddressCIDRV6(value string) error { - ip, subnet, err := net.ParseCIDR(value) - if err != nil { - return err - } - - if ip.To4() != nil { - return fmt.Errorf("Not an IPv6 address %q", value) - } - - if ip.String() == subnet.IP.String() { - return fmt.Errorf("Not a usable IPv6 address %q", value) - } - - return nil -} - -// IsNetworkRangeV6 validates an IPv6 range in the format "start-end". -func IsNetworkRangeV6(value string) error { - ips := strings.SplitN(value, "-", 2) - if len(ips) != 2 { - return fmt.Errorf("IP range must contain start and end IP addresses") - } - - for _, ip := range ips { - err := IsNetworkAddressV6(ip) - if err != nil { - return err - } - } - - return nil -} - -// IsNetworkVLAN validates a VLAN ID. -func IsNetworkVLAN(value string) error { - vlanID, err := strconv.Atoi(value) - if err != nil { - return fmt.Errorf("Invalid VLAN ID %q", value) - } - - if vlanID < 0 || vlanID > 4094 { - return fmt.Errorf("Out of VLAN ID range (0-4094) %q", value) - } - - return nil -} - -// IsNetworkMTU validates MTU number >= 1280 and <= 16384. -// Anything below 68 and the kernel doesn't allow IPv4, anything below 1280 and the kernel doesn't allow IPv6. -// So require an IPv6-compatible MTU as the low value and cap at the max ethernet jumbo frame size. -func IsNetworkMTU(value string) error { - mtu, err := strconv.ParseUint(value, 10, 32) - if err != nil { - return fmt.Errorf("Invalid MTU %q", value) - } - - if mtu < 1280 || mtu > 16384 { - return fmt.Errorf("Out of MTU range (1280-16384) %q", value) - } - - return nil -} - -// IsNetworkPort validates an IP port number >= 0 and <= 65535. -func IsNetworkPort(value string) error { - port, err := strconv.ParseUint(value, 10, 32) - if err != nil { - return fmt.Errorf("Invalid port number %q", value) - } - - if port > 65535 { - return fmt.Errorf("Out of port number range (0-65535) %q", value) - } - - return nil -} - -// IsNetworkPortRange validates an IP port range in the format "port" or "start-end". -func IsNetworkPortRange(value string) error { - ports := strings.SplitN(value, "-", 2) - portsLen := len(ports) - if portsLen != 1 && portsLen != 2 { - return fmt.Errorf("Port range must contain either a single port or start and end port numbers") - } - - startPort, err := strconv.ParseUint(ports[0], 10, 32) - if err != nil { - return fmt.Errorf("Invalid port number %q", value) - } - - if portsLen == 2 { - endPort, err := strconv.ParseUint(ports[1], 10, 32) - if err != nil { - return fmt.Errorf("Invalid end port number %q", value) - } - - if startPort >= endPort { - return fmt.Errorf("Start port %d must be lower than end port %d", startPort, endPort) - } - } - - return nil -} - -// IsURLSegmentSafe validates whether value can be used in a URL segment. -func IsURLSegmentSafe(value string) error { - for _, char := range []string{"/", "?", "&", "+"} { - if strings.Contains(value, char) { - return fmt.Errorf("Cannot contain %q", char) - } - } - - return nil -} - -// IsUUID validates whether a value is a UUID. -func IsUUID(value string) error { - if uuid.Parse(value) == nil { - return fmt.Errorf("Invalid UUID") - } - - return nil -} - -// IsPCIAddress validates whether a value is a PCI address. -func IsPCIAddress(value string) error { - regexHex, err := regexp.Compile(`^([0-9a-fA-F]{4}?:)?[0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F]$`) - if err != nil { - return err - } - - if !regexHex.MatchString(value) { - return fmt.Errorf("Invalid PCI address") - } - - return nil -} - -// IsCompressionAlgorithm validates whether a value is a valid compression algorithm and is available on the system. -func IsCompressionAlgorithm(value string) error { - if value == "none" { - return nil - } - - // Going to look up tar2sqfs executable binary - if value == "squashfs" { - value = "tar2sqfs" - } - - // Parse the command. - fields, err := shellquote.Split(value) - if err != nil { - return err - } - - _, err = exec.LookPath(fields[0]) - return err -} - -// IsArchitecture validates whether the value is a valid LXD architecture name. -func IsArchitecture(value string) error { - return IsOneOf(osarch.SupportedArchitectures()...)(value) -} - -// IsCron checks that it's a valid cron pattern or alias. -func IsCron(aliases []string) func(value string) error { - return func(value string) error { - isValid := func(value string) error { - // Accept valid aliases. - for _, alias := range aliases { - if alias == value { - return nil - } - } - - if len(strings.Split(value, " ")) != 5 { - return fmt.Errorf("Schedule must be of the form: ") - } - - _, err := cron.ParseStandard(value) - if err != nil { - return fmt.Errorf("Error parsing schedule: %w", err) - } - - return nil - } - - // Can be comma+space separated (just commas are valid cron pattern). - value = strings.ToLower(value) - triggers := strings.Split(value, ", ") - for _, trigger := range triggers { - err := isValid(trigger) - if err != nil { - return err - } - } - - return nil - } -} - -// IsListenAddress returns a validator for a listen address. -func IsListenAddress(allowDNS bool, allowWildcard bool, requirePort bool) func(value string) error { - return func(value string) error { - // Validate address format and port. - host, _, err := net.SplitHostPort(value) - if err != nil { - if requirePort { - return fmt.Errorf("A port is required as part of the address") - } - - host = value - } - - // Validate wildcard. - if stringInSlice(host, []string{"", "::", "[::]", "0.0.0.0"}) { - if !allowWildcard { - return fmt.Errorf("Wildcard addresses aren't allowed") - } - - return nil - } - - // Validate DNS. - ip := net.ParseIP(strings.Trim(host, "[]")) - if ip != nil { - return nil - } - - if !allowDNS { - return fmt.Errorf("DNS names not allowed in address") - } - - _, err = net.LookupHost(host) - if err != nil { - return fmt.Errorf("Couldn't resolve %q", host) - } - - return nil - } -} - -// IsX509Certificate checks if the value is a valid x509 PEM Certificate. -func IsX509Certificate(value string) error { - certBlock, _ := pem.Decode([]byte(value)) - if certBlock == nil { - return fmt.Errorf("Invalid certificate") - } - - _, err := x509.ParseCertificate(certBlock.Bytes) - - return err -} - -// IsAbsFilePath checks if value is an absolute file path. -func IsAbsFilePath(value string) error { - if !filepath.IsAbs(value) { - return fmt.Errorf("Must be absolute file path") - } - - return nil -} - -// ParseNetworkVLANRange parses a VLAN range in the form "number" or "start-end". -// Returns the start number and the number of items in the range. -func ParseNetworkVLANRange(vlan string) (int, int, error) { - err := IsNetworkVLAN(vlan) - if err == nil { - vlanRangeStart, err := strconv.Atoi(vlan) - if err != nil { - return -1, -1, err - } - - return vlanRangeStart, 1, nil - } - - vlanRange := strings.Split(vlan, "-") - if len(vlanRange) != 2 { - return -1, -1, fmt.Errorf("Invalid VLAN range input: %s", vlan) - } - - if IsNetworkVLAN(vlanRange[0]) != nil || IsNetworkVLAN(vlanRange[1]) != nil { - return -1, -1, fmt.Errorf("Invalid VLAN range boundary. start:%s, end:%s", vlanRange[0], vlanRange[1]) - } - - vlanRangeStart, err := strconv.Atoi(vlanRange[0]) - if err != nil { - return -1, -1, err - } - - vlanRangeEnd, err := strconv.Atoi(vlanRange[1]) - if err != nil { - return -1, -1, err - } - - if vlanRangeStart > vlanRangeEnd { - return -1, -1, fmt.Errorf("Invalid VLAN range boundary. start:%d is higher than end:%d", vlanRangeStart, vlanRangeEnd) - } - - return vlanRangeStart, vlanRangeEnd - vlanRangeStart + 1, nil -} - -// IsHostname checks the string is valid DNS hostname. -func IsHostname(name string) error { - // Validate length - if len(name) < 1 || len(name) > 63 { - return fmt.Errorf("Name must be 1-63 characters long") - } - - // Validate first character - if strings.HasPrefix(name, "-") { - return fmt.Errorf(`Name must not start with "-" character`) - } - - // Validate last character - if strings.HasSuffix(name, "-") { - return fmt.Errorf(`Name must not end with "-" character`) - } - - _, err := strconv.Atoi(string(name[0])) - if err == nil { - return fmt.Errorf("Name must not start with a number") - } - - match, err := regexp.MatchString(`^[\-a-zA-Z0-9]+$`, name) - if err != nil { - return err - } - - if !match { - return fmt.Errorf("Name can only contain alphanumeric and hyphen characters") - } - - return nil -} - -// IsDeviceName checks name is 1-63 characters long, doesn't start with a full stop and contains only alphanumeric, -// forward slash, hyphen, colon, underscore and full stop characters. -func IsDeviceName(name string) error { - if len(name) < 1 || len(name) > 63 { - return fmt.Errorf("Name must be 1-63 characters long") - } - - if string(name[0]) == "." { - return fmt.Errorf(`Name must not start with "." character`) - } - - match, err := regexp.MatchString(`^[\/\.\-:_a-zA-Z0-9]+$`, name) - if err != nil { - return err - } - - if !match { - return fmt.Errorf("Name can only contain alphanumeric, forward slash, hyphen, colon, underscore and full stop characters") - } - - return nil -} - -// IsRequestURL checks value is a valid HTTP/HTTPS request URL. -func IsRequestURL(value string) error { - if value == "" { - return fmt.Errorf("Empty URL") - } - - _, err := url.ParseRequestURI(value) - if err != nil { - return fmt.Errorf("Invalid URL: %w", err) - } - - return nil -} - -// IsCloudInitUserData checks value is valid cloud-init user data. -func IsCloudInitUserData(value string) error { - if value == "#cloud-config" || strings.HasPrefix(value, "#cloud-config\n") { - lines := strings.SplitN(value, "\n", 2) - - // If value only contains the cloud-config header, it is valid. - if len(lines) == 1 { - return nil - } - - return IsYAML(lines[1]) - } - - // Since there are various other user-data formats besides cloud-config, consider those valid. - return nil -} - -// IsYAML checks value is valid YAML. -func IsYAML(value string) error { - out := struct{}{} - - err := yaml.Unmarshal([]byte(value), &out) - if err != nil { - return err - } - - return nil -} - -// IsValidCPUSet checks value is a valid CPU set. -func IsValidCPUSet(value string) error { - // Validate the CPU set syntax. - match, _ := regexp.MatchString("^([0-9]+([,-][0-9]+)?)(,[0-9]+([,-][0-9]+)*)?$", value) - if !match { - return fmt.Errorf("Invalid CPU limit syntax") - } - - cpus := make(map[int64]int) - chunks := strings.Split(value, ",") - - for _, chunk := range chunks { - if strings.Contains(chunk, "-") { - // Range - fields := strings.SplitN(chunk, "-", 2) - if len(fields) != 2 { - return fmt.Errorf("Invalid cpuset value: %s", value) - } - - low, err := strconv.ParseInt(fields[0], 10, 64) - if err != nil { - return fmt.Errorf("Invalid cpuset value: %s", value) - } - - high, err := strconv.ParseInt(fields[1], 10, 64) - if err != nil { - return fmt.Errorf("Invalid cpuset value: %s", value) - } - - for i := low; i <= high; i++ { - cpus[i]++ - } - } else { - // Simple entry - nr, err := strconv.ParseInt(chunk, 10, 64) - if err != nil { - return fmt.Errorf("Invalid cpuset value: %s", value) - } - - cpus[nr]++ - } - } - - for i := range cpus { - // The CPU was specified more than once, e.g. 1-3,3. - if cpus[i] > 1 { - return fmt.Errorf("Cannot define CPU multiple times") - } - } - - return nil -} diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml deleted file mode 100644 index 3deb4a12..00000000 --- a/vendor/github.com/pborman/uuid/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - "1.9" - - "1.10" - - "1.11" - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f..00000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS deleted file mode 100644 index b382a04e..00000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTORS +++ /dev/null @@ -1 +0,0 @@ -Paul Borman diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE deleted file mode 100644 index 5dc68268..00000000 --- a/vendor/github.com/pborman/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md deleted file mode 100644 index 810ad40d..00000000 --- a/vendor/github.com/pborman/uuid/README.md +++ /dev/null @@ -1,15 +0,0 @@ -This project was automatically exported from code.google.com/p/go-uuid - -# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. - -This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). - -###### Install -`go get github.com/pborman/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) - -Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go deleted file mode 100644 index 50a0f2d0..00000000 --- a/vendor/github.com/pborman/uuid/dce.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) UUID { - uuid := NewUUID() - if uuid != nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCEPerson(Person, uint32(os.Getuid())) -func NewDCEPerson() UUID { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCEGroup(Group, uint32(os.Getgid())) -func NewDCEGroup() UUID { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID or false. -func (uuid UUID) Domain() (Domain, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return Domain(uuid[9]), true -} - -// Id returns the id for a Version 2 UUID or false. -func (uuid UUID) Id() (uint32, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return binary.BigEndian.Uint32(uuid[0:4]), true -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go deleted file mode 100644 index 727d7616..00000000 --- a/vendor/github.com/pborman/uuid/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The uuid package generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// This package is a partial wrapper around the github.com/google/uuid package. -// This package represents a UUID as []byte while github.com/google/uuid -// represents a UUID as [16]byte. -package uuid diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go deleted file mode 100644 index a0420c1e..00000000 --- a/vendor/github.com/pborman/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known Name Space IDs and UUIDs -var ( - NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") - NIL = Parse("00000000-0000-0000-0000-000000000000") -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space) - h.Write([]byte(data)) - s := h.Sum(nil) - uuid := make([]byte, 16) - copy(uuid, s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go deleted file mode 100644 index 35b89352..00000000 --- a/vendor/github.com/pborman/uuid/marshal.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "errors" - "fmt" - - guuid "github.com/google/uuid" -) - -// MarshalText implements encoding.TextMarshaler. -func (u UUID) MarshalText() ([]byte, error) { - if len(u) != 16 { - return nil, nil - } - var js [36]byte - encodeHex(js[:], u) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *UUID) UnmarshalText(data []byte) error { - if len(data) == 0 { - return nil - } - id := Parse(string(data)) - if id == nil { - return errors.New("invalid UUID") - } - *u = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u UUID) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *UUID) UnmarshalBinary(data []byte) error { - if len(data) == 0 { - return nil - } - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - var id [16]byte - copy(id[:], data) - *u = id[:] - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (u Array) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], u[:]) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *Array) UnmarshalText(data []byte) error { - id, err := guuid.ParseBytes(data) - if err != nil { - return err - } - *u = Array(id) - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u Array) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *Array) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(u[:], data) - return nil -} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go deleted file mode 100644 index e524e010..00000000 --- a/vendor/github.com/pborman/uuid/node.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - return guuid.NodeInterface() -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - return guuid.SetNodeInterface(name) -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - return guuid.NodeID() -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - return guuid.SetNodeID(id) -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - if len(uuid) != 16 { - return nil - } - node := make([]byte, 6) - copy(node, uuid[10:]) - return node -} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go deleted file mode 100644 index 929c3847..00000000 --- a/vendor/github.com/pborman/uuid/sql.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "errors" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src.(type) { - case string: - // if an empty UUID comes from a table, we return a null UUID - if src.(string) == "" { - return nil - } - - // see uuid.Parse for required string format - parsed := Parse(src.(string)) - - if parsed == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = parsed - case []byte: - b := src.([]byte) - - // if an empty UUID comes from a table, we return a null UUID - if len(b) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(b) == 16 { - parsed := make([]byte, 16) - copy(parsed, b) - *uuid = UUID(parsed) - } else { - u := Parse(string(b)) - - if u == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = u - } - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go deleted file mode 100644 index 7286824d..00000000 --- a/vendor/github.com/pborman/uuid/time.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - - guuid "github.com/google/uuid" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time = guuid.Time - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { return guuid.GetTime() } - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence a new random -// clock sequence is generated the first time a clock sequence is requested by -// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated -// for -func ClockSequence() int { return guuid.ClockSequence() } - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. It returns false if uuid is not valid. The time is only well defined -// for version 1 and 2 UUIDs. -func (uuid UUID) Time() (Time, bool) { - if len(uuid) != 16 { - return 0, false - } - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time), true -} - -// ClockSequence returns the clock sequence encoded in uuid. It returns false -// if uuid is not valid. The clock sequence is only well defined for version 1 -// and 2 UUIDs. -func (uuid UUID) ClockSequence() (int, bool) { - if len(uuid) != 16 { - return 0, false - } - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true -} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go deleted file mode 100644 index 255b5e24..00000000 --- a/vendor/github.com/pborman/uuid/util.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts the the first two hex bytes of x into a byte. -func xtob(x string) (byte, bool) { - b1 := xvalues[x[0]] - b2 := xvalues[x[1]] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go deleted file mode 100644 index 33700042..00000000 --- a/vendor/github.com/pborman/uuid/uuid.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "io" - - guuid "github.com/google/uuid" -) - -// Array is a pass-by-value UUID that can be used as an effecient key in a map. -type Array [16]byte - -// UUID converts uuid into a slice. -func (uuid Array) UUID() UUID { - return uuid[:] -} - -// String returns the string representation of uuid, -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (uuid Array) String() string { - return guuid.UUID(uuid).String() -} - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID []byte - -// A Version represents a UUIDs version. -type Version = guuid.Version - -// A Variant represents a UUIDs variant. -type Variant = guuid.Variant - -// Constants returned by Variant. -const ( - Invalid = guuid.Invalid // Invalid UUID - RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 - Reserved = guuid.Reserved // Reserved, NCS backward compatibility. - Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future = guuid.Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// New returns a new random (version 4) UUID as a string. It is a convenience -// function for NewRandom().String(). -func New() string { - return NewRandom().String() -} - -// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for -// the formats parsed. -func Parse(s string) UUID { - gu, err := guuid.Parse(s) - if err == nil { - return gu[:] - } - return nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - gu, err := guuid.ParseBytes(b) - if err == nil { - return gu[:], nil - } - return nil, err -} - -// Equal returns true if uuid1 and uuid2 are equal. -func Equal(uuid1, uuid2 UUID) bool { - return bytes.Equal(uuid1, uuid2) -} - -// Array returns an array representation of uuid that can be used as a map key. -// Array panics if uuid is not valid. -func (uuid UUID) Array() Array { - if len(uuid) != 16 { - panic("invalid uuid") - } - var a Array - copy(a[:], uuid) - return a -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - if len(uuid) != 16 { - return "" - } - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - if len(uuid) != 16 { - return "" - } - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst[:], uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. It returns Invalid if -// uuid is invalid. -func (uuid UUID) Variant() Variant { - if len(uuid) != 16 { - return Invalid - } - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. It returns false if uuid is not -// valid. -func (uuid UUID) Version() (Version, bool) { - if len(uuid) != 16 { - return 0, false - } - return Version(uuid[6] >> 4), true -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - guuid.SetRand(r) -} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go deleted file mode 100644 index 7af948da..00000000 --- a/vendor/github.com/pborman/uuid/version1.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil. -func NewUUID() UUID { - gu, err := guuid.NewUUID() - if err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go deleted file mode 100644 index 767dd0c3..00000000 --- a/vendor/github.com/pborman/uuid/version4.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import guuid "github.com/google/uuid" - -// NewRandom returns a Random (Version 4) UUID or panics. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() UUID { - if gu, err := guuid.NewRandom(); err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/vendor/github.com/pkg/sftp/.gitignore b/vendor/github.com/pkg/sftp/.gitignore deleted file mode 100644 index caf2dca2..00000000 --- a/vendor/github.com/pkg/sftp/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -.*.swo -.*.swp - -server_standalone/server_standalone - -examples/*/id_rsa -examples/*/id_rsa.pub - -memprofile.out -memprofile.svg diff --git a/vendor/github.com/pkg/sftp/CONTRIBUTORS b/vendor/github.com/pkg/sftp/CONTRIBUTORS deleted file mode 100644 index 5c7196ae..00000000 --- a/vendor/github.com/pkg/sftp/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -Dave Cheney -Saulius Gurklys -John Eikenberry diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE deleted file mode 100644 index b7b53921..00000000 --- a/vendor/github.com/pkg/sftp/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) 2013, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/sftp/Makefile b/vendor/github.com/pkg/sftp/Makefile deleted file mode 100644 index 4d3a0079..00000000 --- a/vendor/github.com/pkg/sftp/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -.PHONY: integration integration_w_race benchmark - -integration: - go test -integration -v ./... - go test -testserver -v ./... - go test -integration -testserver -v ./... - go test -integration -allocator -v ./... - go test -testserver -allocator -v ./... - go test -integration -testserver -allocator -v ./... - -integration_w_race: - go test -race -integration -v ./... - go test -race -testserver -v ./... - go test -race -integration -testserver -v ./... - go test -race -integration -allocator -v ./... - go test -race -testserver -allocator -v ./... - go test -race -integration -allocator -testserver -v ./... - -COUNT ?= 1 -BENCHMARK_PATTERN ?= "." - -benchmark: - go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) - -benchmark_w_memprofile: - go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) -memprofile memprofile.out - go tool pprof -svg -output=memprofile.svg memprofile.out diff --git a/vendor/github.com/pkg/sftp/README.md b/vendor/github.com/pkg/sftp/README.md deleted file mode 100644 index 5e78cd39..00000000 --- a/vendor/github.com/pkg/sftp/README.md +++ /dev/null @@ -1,44 +0,0 @@ -sftp ----- - -The `sftp` package provides support for file system operations on remote ssh -servers using the SFTP subsystem. It also implements an SFTP server for serving -files from the filesystem. - -![CI Status](https://github.com/pkg/sftp/workflows/CI/badge.svg?branch=master&event=push) [![Go Reference](https://pkg.go.dev/badge/github.com/pkg/sftp.svg)](https://pkg.go.dev/github.com/pkg/sftp) - -usage and examples ------------------- - -See [https://pkg.go.dev/github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) for -examples and usage. - -The basic operation of the package mirrors the facilities of the -[os](http://golang.org/pkg/os) package. - -The Walker interface for directory traversal is heavily inspired by Keith -Rarick's [fs](https://pkg.go.dev/github.com/kr/fs) package. - -roadmap -------- - -* There is way too much duplication in the Client methods. If there was an - unmarshal(interface{}) method this would reduce a heap of the duplication. - -contributing ------------- - -We welcome pull requests, bug fixes and issue reports. - -Before proposing a large change, first please discuss your change by raising an -issue. - -For API/code bugs, please include a small, self contained code example to -reproduce the issue. For pull requests, remember test coverage. - -We try to handle issues and pull requests with a 0 open philosophy. That means -we will try to address the submission as soon as possible and will work toward -a resolution. If progress can no longer be made (eg. unreproducible bug) or -stops (eg. unresponsive submitter), we will close the bug. - -Thanks. diff --git a/vendor/github.com/pkg/sftp/allocator.go b/vendor/github.com/pkg/sftp/allocator.go deleted file mode 100644 index 3e67e543..00000000 --- a/vendor/github.com/pkg/sftp/allocator.go +++ /dev/null @@ -1,96 +0,0 @@ -package sftp - -import ( - "sync" -) - -type allocator struct { - sync.Mutex - available [][]byte - // map key is the request order - used map[uint32][][]byte -} - -func newAllocator() *allocator { - return &allocator{ - // micro optimization: initialize available pages with an initial capacity - available: make([][]byte, 0, SftpServerWorkerCount*2), - used: make(map[uint32][][]byte), - } -} - -// GetPage returns a previously allocated and unused []byte or create a new one. -// The slice have a fixed size = maxMsgLength, this value is suitable for both -// receiving new packets and reading the files to serve -func (a *allocator) GetPage(requestOrderID uint32) []byte { - a.Lock() - defer a.Unlock() - - var result []byte - - // get an available page and remove it from the available ones. - if len(a.available) > 0 { - truncLength := len(a.available) - 1 - result = a.available[truncLength] - - a.available[truncLength] = nil // clear out the internal pointer - a.available = a.available[:truncLength] // truncate the slice - } - - // no preallocated slice found, just allocate a new one - if result == nil { - result = make([]byte, maxMsgLength) - } - - // put result in used pages - a.used[requestOrderID] = append(a.used[requestOrderID], result) - - return result -} - -// ReleasePages marks unused all pages in use for the given requestID -func (a *allocator) ReleasePages(requestOrderID uint32) { - a.Lock() - defer a.Unlock() - - if used := a.used[requestOrderID]; len(used) > 0 { - a.available = append(a.available, used...) - } - delete(a.used, requestOrderID) -} - -// Free removes all the used and available pages. -// Call this method when the allocator is not needed anymore -func (a *allocator) Free() { - a.Lock() - defer a.Unlock() - - a.available = nil - a.used = make(map[uint32][][]byte) -} - -func (a *allocator) countUsedPages() int { - a.Lock() - defer a.Unlock() - - num := 0 - for _, p := range a.used { - num += len(p) - } - return num -} - -func (a *allocator) countAvailablePages() int { - a.Lock() - defer a.Unlock() - - return len(a.available) -} - -func (a *allocator) isRequestOrderIDUsed(requestOrderID uint32) bool { - a.Lock() - defer a.Unlock() - - _, ok := a.used[requestOrderID] - return ok -} diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go deleted file mode 100644 index 2bb2d576..00000000 --- a/vendor/github.com/pkg/sftp/attrs.go +++ /dev/null @@ -1,90 +0,0 @@ -package sftp - -// ssh_FXP_ATTRS support -// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 - -import ( - "os" - "time" -) - -const ( - sshFileXferAttrSize = 0x00000001 - sshFileXferAttrUIDGID = 0x00000002 - sshFileXferAttrPermissions = 0x00000004 - sshFileXferAttrACmodTime = 0x00000008 - sshFileXferAttrExtended = 0x80000000 - - sshFileXferAttrAll = sshFileXferAttrSize | sshFileXferAttrUIDGID | sshFileXferAttrPermissions | - sshFileXferAttrACmodTime | sshFileXferAttrExtended -) - -// fileInfo is an artificial type designed to satisfy os.FileInfo. -type fileInfo struct { - name string - stat *FileStat -} - -// Name returns the base name of the file. -func (fi *fileInfo) Name() string { return fi.name } - -// Size returns the length in bytes for regular files; system-dependent for others. -func (fi *fileInfo) Size() int64 { return int64(fi.stat.Size) } - -// Mode returns file mode bits. -func (fi *fileInfo) Mode() os.FileMode { return toFileMode(fi.stat.Mode) } - -// ModTime returns the last modification time of the file. -func (fi *fileInfo) ModTime() time.Time { return time.Unix(int64(fi.stat.Mtime), 0) } - -// IsDir returns true if the file is a directory. -func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } - -func (fi *fileInfo) Sys() interface{} { return fi.stat } - -// FileStat holds the original unmarshalled values from a call to READDIR or -// *STAT. It is exported for the purposes of accessing the raw values via -// os.FileInfo.Sys(). It is also used server side to store the unmarshalled -// values for SetStat. -type FileStat struct { - Size uint64 - Mode uint32 - Mtime uint32 - Atime uint32 - UID uint32 - GID uint32 - Extended []StatExtended -} - -// StatExtended contains additional, extended information for a FileStat. -type StatExtended struct { - ExtType string - ExtData string -} - -func fileInfoFromStat(stat *FileStat, name string) os.FileInfo { - return &fileInfo{ - name: name, - stat: stat, - } -} - -func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) { - mtime := fi.ModTime().Unix() - atime := mtime - var flags uint32 = sshFileXferAttrSize | - sshFileXferAttrPermissions | - sshFileXferAttrACmodTime - - fileStat := &FileStat{ - Size: uint64(fi.Size()), - Mode: fromFileMode(fi.Mode()), - Mtime: uint32(mtime), - Atime: uint32(atime), - } - - // os specific file stat decoding - fileStatFromInfoOs(fi, &flags, fileStat) - - return flags, fileStat -} diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go deleted file mode 100644 index c01f3367..00000000 --- a/vendor/github.com/pkg/sftp/attrs_stubs.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build plan9 windows android - -package sftp - -import ( - "os" -) - -func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { - // todo -} diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go deleted file mode 100644 index d1f44524..00000000 --- a/vendor/github.com/pkg/sftp/attrs_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js - -package sftp - -import ( - "os" - "syscall" -) - -func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { - if statt, ok := fi.Sys().(*syscall.Stat_t); ok { - *flags |= sshFileXferAttrUIDGID - fileStat.UID = statt.Uid - fileStat.GID = statt.Gid - } -} diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go deleted file mode 100644 index 9e0b6164..00000000 --- a/vendor/github.com/pkg/sftp/client.go +++ /dev/null @@ -1,1977 +0,0 @@ -package sftp - -import ( - "bytes" - "encoding/binary" - "errors" - "io" - "math" - "os" - "path" - "sync" - "sync/atomic" - "syscall" - "time" - - "github.com/kr/fs" - "golang.org/x/crypto/ssh" -) - -var ( - // ErrInternalInconsistency indicates the packets sent and the data queued to be - // written to the file don't match up. It is an unusual error and usually is - // caused by bad behavior server side or connection issues. The error is - // limited in scope to the call where it happened, the client object is still - // OK to use as long as the connection is still open. - ErrInternalInconsistency = errors.New("internal inconsistency") - // InternalInconsistency alias for ErrInternalInconsistency. - // - // Deprecated: please use ErrInternalInconsistency - InternalInconsistency = ErrInternalInconsistency -) - -// A ClientOption is a function which applies configuration to a Client. -type ClientOption func(*Client) error - -// MaxPacketChecked sets the maximum size of the payload, measured in bytes. -// This option only accepts sizes servers should support, ie. <= 32768 bytes. -// -// If you get the error "failed to send packet header: EOF" when copying a -// large file, try lowering this number. -// -// The default packet size is 32768 bytes. -func MaxPacketChecked(size int) ClientOption { - return func(c *Client) error { - if size < 1 { - return errors.New("size must be greater or equal to 1") - } - if size > 32768 { - return errors.New("sizes larger than 32KB might not work with all servers") - } - c.maxPacket = size - return nil - } -} - -// MaxPacketUnchecked sets the maximum size of the payload, measured in bytes. -// It accepts sizes larger than the 32768 bytes all servers should support. -// Only use a setting higher than 32768 if your application always connects to -// the same server or after sufficiently broad testing. -// -// If you get the error "failed to send packet header: EOF" when copying a -// large file, try lowering this number. -// -// The default packet size is 32768 bytes. -func MaxPacketUnchecked(size int) ClientOption { - return func(c *Client) error { - if size < 1 { - return errors.New("size must be greater or equal to 1") - } - c.maxPacket = size - return nil - } -} - -// MaxPacket sets the maximum size of the payload, measured in bytes. -// This option only accepts sizes servers should support, ie. <= 32768 bytes. -// This is a synonym for MaxPacketChecked that provides backward compatibility. -// -// If you get the error "failed to send packet header: EOF" when copying a -// large file, try lowering this number. -// -// The default packet size is 32768 bytes. -func MaxPacket(size int) ClientOption { - return MaxPacketChecked(size) -} - -// MaxConcurrentRequestsPerFile sets the maximum concurrent requests allowed for a single file. -// -// The default maximum concurrent requests is 64. -func MaxConcurrentRequestsPerFile(n int) ClientOption { - return func(c *Client) error { - if n < 1 { - return errors.New("n must be greater or equal to 1") - } - c.maxConcurrentRequests = n - return nil - } -} - -// UseConcurrentWrites allows the Client to perform concurrent Writes. -// -// Using concurrency while doing writes, requires special consideration. -// A write to a later offset in a file after an error, -// could end up with a file length longer than what was successfully written. -// -// When using this option, if you receive an error during `io.Copy` or `io.WriteTo`, -// you may need to `Truncate` the target Writer to avoid “holes” in the data written. -func UseConcurrentWrites(value bool) ClientOption { - return func(c *Client) error { - c.useConcurrentWrites = value - return nil - } -} - -// UseConcurrentReads allows the Client to perform concurrent Reads. -// -// Concurrent reads are generally safe to use and not using them will degrade -// performance, so this option is enabled by default. -// -// When enabled, WriteTo will use Stat/Fstat to get the file size and determines -// how many concurrent workers to use. -// Some "read once" servers will delete the file if they receive a stat call on an -// open file and then the download will fail. -// Disabling concurrent reads you will be able to download files from these servers. -// If concurrent reads are disabled, the UseFstat option is ignored. -func UseConcurrentReads(value bool) ClientOption { - return func(c *Client) error { - c.disableConcurrentReads = !value - return nil - } -} - -// UseFstat sets whether to use Fstat or Stat when File.WriteTo is called -// (usually when copying files). -// Some servers limit the amount of open files and calling Stat after opening -// the file will throw an error From the server. Setting this flag will call -// Fstat instead of Stat which is suppose to be called on an open file handle. -// -// It has been found that that with IBM Sterling SFTP servers which have -// "extractability" level set to 1 which means only 1 file can be opened at -// any given time. -// -// If the server you are working with still has an issue with both Stat and -// Fstat calls you can always open a file and read it until the end. -// -// Another reason to read the file until its end and Fstat doesn't work is -// that in some servers, reading a full file will automatically delete the -// file as some of these mainframes map the file to a message in a queue. -// Once the file has been read it will get deleted. -func UseFstat(value bool) ClientOption { - return func(c *Client) error { - c.useFstat = value - return nil - } -} - -// Client represents an SFTP session on a *ssh.ClientConn SSH connection. -// Multiple Clients can be active on a single SSH connection, and a Client -// may be called concurrently from multiple Goroutines. -// -// Client implements the github.com/kr/fs.FileSystem interface. -type Client struct { - clientConn - - ext map[string]string // Extensions (name -> data). - - maxPacket int // max packet size read or written. - maxConcurrentRequests int - nextid uint32 - - // write concurrency is… error prone. - // Default behavior should be to not use it. - useConcurrentWrites bool - useFstat bool - disableConcurrentReads bool -} - -// NewClient creates a new SFTP client on conn, using zero or more option -// functions. -func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) { - s, err := conn.NewSession() - if err != nil { - return nil, err - } - if err := s.RequestSubsystem("sftp"); err != nil { - return nil, err - } - pw, err := s.StdinPipe() - if err != nil { - return nil, err - } - pr, err := s.StdoutPipe() - if err != nil { - return nil, err - } - - return NewClientPipe(pr, pw, opts...) -} - -// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser. -// This can be used for connecting to an SFTP server over TCP/TLS or by using -// the system's ssh client program (e.g. via exec.Command). -func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) { - sftp := &Client{ - clientConn: clientConn{ - conn: conn{ - Reader: rd, - WriteCloser: wr, - }, - inflight: make(map[uint32]chan<- result), - closed: make(chan struct{}), - }, - - ext: make(map[string]string), - - maxPacket: 1 << 15, - maxConcurrentRequests: 64, - } - - for _, opt := range opts { - if err := opt(sftp); err != nil { - wr.Close() - return nil, err - } - } - - if err := sftp.sendInit(); err != nil { - wr.Close() - return nil, err - } - if err := sftp.recvVersion(); err != nil { - wr.Close() - return nil, err - } - - sftp.clientConn.wg.Add(1) - go sftp.loop() - - return sftp, nil -} - -// Create creates the named file mode 0666 (before umask), truncating it if it -// already exists. If successful, methods on the returned File can be used for -// I/O; the associated file descriptor has mode O_RDWR. If you need more -// control over the flags/mode used to open the file see client.OpenFile. -// -// Note that some SFTP servers (eg. AWS Transfer) do not support opening files -// read/write at the same time. For those services you will need to use -// `client.OpenFile(os.O_WRONLY|os.O_CREATE|os.O_TRUNC)`. -func (c *Client) Create(path string) (*File, error) { - return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) -} - -const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 - -func (c *Client) sendInit() error { - return c.clientConn.conn.sendPacket(&sshFxInitPacket{ - Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 - }) -} - -// returns the next value of c.nextid -func (c *Client) nextID() uint32 { - return atomic.AddUint32(&c.nextid, 1) -} - -func (c *Client) recvVersion() error { - typ, data, err := c.recvPacket(0) - if err != nil { - return err - } - if typ != sshFxpVersion { - return &unexpectedPacketErr{sshFxpVersion, typ} - } - - version, data, err := unmarshalUint32Safe(data) - if err != nil { - return err - } - if version != sftpProtocolVersion { - return &unexpectedVersionErr{sftpProtocolVersion, version} - } - - for len(data) > 0 { - var ext extensionPair - ext, data, err = unmarshalExtensionPair(data) - if err != nil { - return err - } - c.ext[ext.Name] = ext.Data - } - - return nil -} - -// HasExtension checks whether the server supports a named extension. -// -// The first return value is the extension data reported by the server -// (typically a version number). -func (c *Client) HasExtension(name string) (string, bool) { - data, ok := c.ext[name] - return data, ok -} - -// Walk returns a new Walker rooted at root. -func (c *Client) Walk(root string) *fs.Walker { - return fs.WalkFS(root, c) -} - -// ReadDir reads the directory named by dirname and returns a list of -// directory entries. -func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { - handle, err := c.opendir(p) - if err != nil { - return nil, err - } - defer c.close(handle) // this has to defer earlier than the lock below - var attrs []os.FileInfo - var done = false - for !done { - id := c.nextID() - typ, data, err1 := c.sendPacket(nil, &sshFxpReaddirPacket{ - ID: id, - Handle: handle, - }) - if err1 != nil { - err = err1 - done = true - break - } - switch typ { - case sshFxpName: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - count, data := unmarshalUint32(data) - for i := uint32(0); i < count; i++ { - var filename string - filename, data = unmarshalString(data) - _, data = unmarshalString(data) // discard longname - var attr *FileStat - attr, data = unmarshalAttrs(data) - if filename == "." || filename == ".." { - continue - } - attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) - } - case sshFxpStatus: - // TODO(dfc) scope warning! - err = normaliseError(unmarshalStatus(id, data)) - done = true - default: - return nil, unimplementedPacketErr(typ) - } - } - if err == io.EOF { - err = nil - } - return attrs, err -} - -func (c *Client) opendir(path string) (string, error) { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpOpendirPacket{ - ID: id, - Path: path, - }) - if err != nil { - return "", err - } - switch typ { - case sshFxpHandle: - sid, data := unmarshalUint32(data) - if sid != id { - return "", &unexpectedIDErr{id, sid} - } - handle, _ := unmarshalString(data) - return handle, nil - case sshFxpStatus: - return "", normaliseError(unmarshalStatus(id, data)) - default: - return "", unimplementedPacketErr(typ) - } -} - -// Stat returns a FileInfo structure describing the file specified by path 'p'. -// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file. -func (c *Client) Stat(p string) (os.FileInfo, error) { - fs, err := c.stat(p) - if err != nil { - return nil, err - } - return fileInfoFromStat(fs, path.Base(p)), nil -} - -// Lstat returns a FileInfo structure describing the file specified by path 'p'. -// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link. -func (c *Client) Lstat(p string) (os.FileInfo, error) { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpLstatPacket{ - ID: id, - Path: p, - }) - if err != nil { - return nil, err - } - switch typ { - case sshFxpAttrs: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return fileInfoFromStat(attr, path.Base(p)), nil - case sshFxpStatus: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// ReadLink reads the target of a symbolic link. -func (c *Client) ReadLink(p string) (string, error) { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpReadlinkPacket{ - ID: id, - Path: p, - }) - if err != nil { - return "", err - } - switch typ { - case sshFxpName: - sid, data := unmarshalUint32(data) - if sid != id { - return "", &unexpectedIDErr{id, sid} - } - count, data := unmarshalUint32(data) - if count != 1 { - return "", unexpectedCount(1, count) - } - filename, _ := unmarshalString(data) // ignore dummy attributes - return filename, nil - case sshFxpStatus: - return "", normaliseError(unmarshalStatus(id, data)) - default: - return "", unimplementedPacketErr(typ) - } -} - -// Link creates a hard link at 'newname', pointing at the same inode as 'oldname' -func (c *Client) Link(oldname, newname string) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpHardlinkPacket{ - ID: id, - Oldpath: oldname, - Newpath: newname, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// Symlink creates a symbolic link at 'newname', pointing at target 'oldname' -func (c *Client) Symlink(oldname, newname string) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpSymlinkPacket{ - ID: id, - Linkpath: newname, - Targetpath: oldname, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -func (c *Client) setfstat(handle string, flags uint32, attrs interface{}) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpFsetstatPacket{ - ID: id, - Handle: handle, - Flags: flags, - Attrs: attrs, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// setstat is a convience wrapper to allow for changing of various parts of the file descriptor. -func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpSetstatPacket{ - ID: id, - Path: path, - Flags: flags, - Attrs: attrs, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// Chtimes changes the access and modification times of the named file. -func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error { - type times struct { - Atime uint32 - Mtime uint32 - } - attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())} - return c.setstat(path, sshFileXferAttrACmodTime, attrs) -} - -// Chown changes the user and group owners of the named file. -func (c *Client) Chown(path string, uid, gid int) error { - type owner struct { - UID uint32 - GID uint32 - } - attrs := owner{uint32(uid), uint32(gid)} - return c.setstat(path, sshFileXferAttrUIDGID, attrs) -} - -// Chmod changes the permissions of the named file. -// -// Chmod does not apply a umask, because even retrieving the umask is not -// possible in a portable way without causing a race condition. Callers -// should mask off umask bits, if desired. -func (c *Client) Chmod(path string, mode os.FileMode) error { - return c.setstat(path, sshFileXferAttrPermissions, toChmodPerm(mode)) -} - -// Truncate sets the size of the named file. Although it may be safely assumed -// that if the size is less than its current size it will be truncated to fit, -// the SFTP protocol does not specify what behavior the server should do when setting -// size greater than the current size. -func (c *Client) Truncate(path string, size int64) error { - return c.setstat(path, sshFileXferAttrSize, uint64(size)) -} - -// Open opens the named file for reading. If successful, methods on the -// returned file can be used for reading; the associated file descriptor -// has mode O_RDONLY. -func (c *Client) Open(path string) (*File, error) { - return c.open(path, flags(os.O_RDONLY)) -} - -// OpenFile is the generalized open call; most users will use Open or -// Create instead. It opens the named file with specified flag (O_RDONLY -// etc.). If successful, methods on the returned File can be used for I/O. -func (c *Client) OpenFile(path string, f int) (*File, error) { - return c.open(path, flags(f)) -} - -func (c *Client) open(path string, pflags uint32) (*File, error) { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpOpenPacket{ - ID: id, - Path: path, - Pflags: pflags, - }) - if err != nil { - return nil, err - } - switch typ { - case sshFxpHandle: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - handle, _ := unmarshalString(data) - return &File{c: c, path: path, handle: handle}, nil - case sshFxpStatus: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// close closes a handle handle previously returned in the response -// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid -// immediately after this request has been sent. -func (c *Client) close(handle string) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpClosePacket{ - ID: id, - Handle: handle, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -func (c *Client) stat(path string) (*FileStat, error) { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpStatPacket{ - ID: id, - Path: path, - }) - if err != nil { - return nil, err - } - switch typ { - case sshFxpAttrs: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return attr, nil - case sshFxpStatus: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -func (c *Client) fstat(handle string) (*FileStat, error) { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpFstatPacket{ - ID: id, - Handle: handle, - }) - if err != nil { - return nil, err - } - switch typ { - case sshFxpAttrs: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return attr, nil - case sshFxpStatus: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// StatVFS retrieves VFS statistics from a remote host. -// -// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature -// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt. -func (c *Client) StatVFS(path string) (*StatVFS, error) { - // send the StatVFS packet to the server - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpStatvfsPacket{ - ID: id, - Path: path, - }) - if err != nil { - return nil, err - } - - switch typ { - // server responded with valid data - case sshFxpExtendedReply: - var response StatVFS - err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response) - if err != nil { - return nil, errors.New("can not parse reply") - } - - return &response, nil - - // the resquest failed - case sshFxpStatus: - return nil, normaliseError(unmarshalStatus(id, data)) - - default: - return nil, unimplementedPacketErr(typ) - } -} - -// Join joins any number of path elements into a single path, adding a -// separating slash if necessary. The result is Cleaned; in particular, all -// empty strings are ignored. -func (c *Client) Join(elem ...string) string { return path.Join(elem...) } - -// Remove removes the specified file or directory. An error will be returned if no -// file or directory with the specified path exists, or if the specified directory -// is not empty. -func (c *Client) Remove(path string) error { - err := c.removeFile(path) - // some servers, *cough* osx *cough*, return EPERM, not ENODIR. - // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY - // EPERM is converted to os.ErrPermission so it is not a StatusError - if err, ok := err.(*StatusError); ok { - switch err.Code { - case sshFxFailure, sshFxFileIsADirectory: - return c.RemoveDirectory(path) - } - } - if os.IsPermission(err) { - return c.RemoveDirectory(path) - } - return err -} - -func (c *Client) removeFile(path string) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpRemovePacket{ - ID: id, - Filename: path, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// RemoveDirectory removes a directory path. -func (c *Client) RemoveDirectory(path string) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpRmdirPacket{ - ID: id, - Path: path, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// Rename renames a file. -func (c *Client) Rename(oldname, newname string) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpRenamePacket{ - ID: id, - Oldpath: oldname, - Newpath: newname, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// PosixRename renames a file using the posix-rename@openssh.com extension -// which will replace newname if it already exists. -func (c *Client) PosixRename(oldname, newname string) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpPosixRenamePacket{ - ID: id, - Oldpath: oldname, - Newpath: newname, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// RealPath can be used to have the server canonicalize any given path name to an absolute path. -// -// This is useful for converting path names containing ".." components, -// or relative pathnames without a leading slash into absolute paths. -func (c *Client) RealPath(path string) (string, error) { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpRealpathPacket{ - ID: id, - Path: path, - }) - if err != nil { - return "", err - } - switch typ { - case sshFxpName: - sid, data := unmarshalUint32(data) - if sid != id { - return "", &unexpectedIDErr{id, sid} - } - count, data := unmarshalUint32(data) - if count != 1 { - return "", unexpectedCount(1, count) - } - filename, _ := unmarshalString(data) // ignore attributes - return filename, nil - case sshFxpStatus: - return "", normaliseError(unmarshalStatus(id, data)) - default: - return "", unimplementedPacketErr(typ) - } -} - -// Getwd returns the current working directory of the server. Operations -// involving relative paths will be based at this location. -func (c *Client) Getwd() (string, error) { - return c.RealPath(".") -} - -// Mkdir creates the specified directory. An error will be returned if a file or -// directory with the specified path already exists, or if the directory's -// parent folder does not exist (the method cannot create complete paths). -func (c *Client) Mkdir(path string) error { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpMkdirPacket{ - ID: id, - Path: path, - }) - if err != nil { - return err - } - switch typ { - case sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// MkdirAll creates a directory named path, along with any necessary parents, -// and returns nil, or else returns an error. -// If path is already a directory, MkdirAll does nothing and returns nil. -// If path contains a regular file, an error is returned -func (c *Client) MkdirAll(path string) error { - // Most of this code mimics https://golang.org/src/os/path.go?s=514:561#L13 - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := c.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && path[i-1] == '/' { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && path[j-1] != '/' { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = c.MkdirAll(path[0 : j-1]) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = c.Mkdir(path) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := c.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// File represents a remote file. -type File struct { - c *Client - path string - handle string - - mu sync.Mutex - offset int64 // current offset within remote file -} - -// Close closes the File, rendering it unusable for I/O. It returns an -// error, if any. -func (f *File) Close() error { - return f.c.close(f.handle) -} - -// Name returns the name of the file as presented to Open or Create. -func (f *File) Name() string { - return f.path -} - -// Read reads up to len(b) bytes from the File. It returns the number of bytes -// read and an error, if any. Read follows io.Reader semantics, so when Read -// encounters an error or EOF condition after successfully reading n > 0 bytes, -// it returns the number of bytes read. -// -// To maximise throughput for transferring the entire file (especially -// over high latency links) it is recommended to use WriteTo rather -// than calling Read multiple times. io.Copy will do this -// automatically. -func (f *File) Read(b []byte) (int, error) { - f.mu.Lock() - defer f.mu.Unlock() - - n, err := f.ReadAt(b, f.offset) - f.offset += int64(n) - return n, err -} - -// readChunkAt attempts to read the whole entire length of the buffer from the file starting at the offset. -// It will continue progressively reading into the buffer until it fills the whole buffer, or an error occurs. -func (f *File) readChunkAt(ch chan result, b []byte, off int64) (n int, err error) { - for err == nil && n < len(b) { - id := f.c.nextID() - typ, data, err := f.c.sendPacket(ch, &sshFxpReadPacket{ - ID: id, - Handle: f.handle, - Offset: uint64(off) + uint64(n), - Len: uint32(len(b) - n), - }) - if err != nil { - return n, err - } - - switch typ { - case sshFxpStatus: - return n, normaliseError(unmarshalStatus(id, data)) - - case sshFxpData: - sid, data := unmarshalUint32(data) - if id != sid { - return n, &unexpectedIDErr{id, sid} - } - - l, data := unmarshalUint32(data) - n += copy(b[n:], data[:l]) - - default: - return n, unimplementedPacketErr(typ) - } - } - - return -} - -func (f *File) readAtSequential(b []byte, off int64) (read int, err error) { - for read < len(b) { - rb := b[read:] - if len(rb) > f.c.maxPacket { - rb = rb[:f.c.maxPacket] - } - n, err := f.readChunkAt(nil, rb, off+int64(read)) - if n < 0 { - panic("sftp.File: returned negative count from readChunkAt") - } - if n > 0 { - read += n - } - if err != nil { - return read, err - } - } - return read, nil -} - -// ReadAt reads up to len(b) byte from the File at a given offset `off`. It returns -// the number of bytes read and an error, if any. ReadAt follows io.ReaderAt semantics, -// so the file offset is not altered during the read. -func (f *File) ReadAt(b []byte, off int64) (int, error) { - if len(b) <= f.c.maxPacket { - // This should be able to be serviced with 1/2 requests. - // So, just do it directly. - return f.readChunkAt(nil, b, off) - } - - if f.c.disableConcurrentReads { - return f.readAtSequential(b, off) - } - - // Split the read into multiple maxPacket-sized concurrent reads bounded by maxConcurrentRequests. - // This allows writes with a suitably large buffer to transfer data at a much faster rate - // by overlapping round trip times. - - cancel := make(chan struct{}) - - concurrency := len(b)/f.c.maxPacket + 1 - if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { - concurrency = f.c.maxConcurrentRequests - } - - resPool := newResChanPool(concurrency) - - type work struct { - id uint32 - res chan result - - b []byte - off int64 - } - workCh := make(chan work) - - // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. - go func() { - defer close(workCh) - - b := b - offset := off - chunkSize := f.c.maxPacket - - for len(b) > 0 { - rb := b - if len(rb) > chunkSize { - rb = rb[:chunkSize] - } - - id := f.c.nextID() - res := resPool.Get() - - f.c.dispatchRequest(res, &sshFxpReadPacket{ - ID: id, - Handle: f.handle, - Offset: uint64(offset), - Len: uint32(chunkSize), - }) - - select { - case workCh <- work{id, res, rb, offset}: - case <-cancel: - return - } - - offset += int64(len(rb)) - b = b[len(rb):] - } - }() - - type rErr struct { - off int64 - err error - } - errCh := make(chan rErr) - - var wg sync.WaitGroup - wg.Add(concurrency) - for i := 0; i < concurrency; i++ { - // Map_i: each worker gets work, and then performs the Read into its buffer from its respective offset. - go func() { - defer wg.Done() - - for packet := range workCh { - var n int - - s := <-packet.res - resPool.Put(packet.res) - - err := s.err - if err == nil { - switch s.typ { - case sshFxpStatus: - err = normaliseError(unmarshalStatus(packet.id, s.data)) - - case sshFxpData: - sid, data := unmarshalUint32(s.data) - if packet.id != sid { - err = &unexpectedIDErr{packet.id, sid} - - } else { - l, data := unmarshalUint32(data) - n = copy(packet.b, data[:l]) - - // For normal disk files, it is guaranteed that this will read - // the specified number of bytes, or up to end of file. - // This implies, if we have a short read, that means EOF. - if n < len(packet.b) { - err = io.EOF - } - } - - default: - err = unimplementedPacketErr(s.typ) - } - } - - if err != nil { - // return the offset as the start + how much we read before the error. - errCh <- rErr{packet.off + int64(n), err} - return - } - } - }() - } - - // Wait for long tail, before closing results. - go func() { - wg.Wait() - close(errCh) - }() - - // Reduce: collect all the results into a relevant return: the earliest offset to return an error. - firstErr := rErr{math.MaxInt64, nil} - for rErr := range errCh { - if rErr.off <= firstErr.off { - firstErr = rErr - } - - select { - case <-cancel: - default: - // stop any more work from being distributed. (Just in case.) - close(cancel) - } - } - - if firstErr.err != nil { - // firstErr.err != nil if and only if firstErr.off > our starting offset. - return int(firstErr.off - off), firstErr.err - } - - // As per spec for io.ReaderAt, we return nil error if and only if we read everything. - return len(b), nil -} - -// writeToSequential implements WriteTo, but works sequentially with no parallelism. -func (f *File) writeToSequential(w io.Writer) (written int64, err error) { - b := make([]byte, f.c.maxPacket) - ch := make(chan result, 1) // reusable channel - - for { - n, err := f.readChunkAt(ch, b, f.offset) - if n < 0 { - panic("sftp.File: returned negative count from readChunkAt") - } - - if n > 0 { - f.offset += int64(n) - - m, err := w.Write(b[:n]) - written += int64(m) - - if err != nil { - return written, err - } - } - - if err != nil { - if err == io.EOF { - return written, nil // return nil explicitly. - } - - return written, err - } - } -} - -// WriteTo writes the file to the given Writer. -// The return value is the number of bytes written. -// Any error encountered during the write is also returned. -// -// This method is preferred over calling Read multiple times -// to maximise throughput for transferring the entire file, -// especially over high latency links. -func (f *File) WriteTo(w io.Writer) (written int64, err error) { - f.mu.Lock() - defer f.mu.Unlock() - - if f.c.disableConcurrentReads { - return f.writeToSequential(w) - } - - // For concurrency, we want to guess how many concurrent workers we should use. - var fileStat *FileStat - if f.c.useFstat { - fileStat, err = f.c.fstat(f.handle) - } else { - fileStat, err = f.c.stat(f.path) - } - if err != nil { - return 0, err - } - - fileSize := fileStat.Size - if fileSize <= uint64(f.c.maxPacket) || !isRegular(fileStat.Mode) { - // only regular files are guaranteed to return (full read) xor (partial read, next error) - return f.writeToSequential(w) - } - - concurrency64 := fileSize/uint64(f.c.maxPacket) + 1 // a bad guess, but better than no guess - if concurrency64 > uint64(f.c.maxConcurrentRequests) || concurrency64 < 1 { - concurrency64 = uint64(f.c.maxConcurrentRequests) - } - // Now that concurrency64 is saturated to an int value, we know this assignment cannot possibly overflow. - concurrency := int(concurrency64) - - chunkSize := f.c.maxPacket - pool := newBufPool(concurrency, chunkSize) - resPool := newResChanPool(concurrency) - - cancel := make(chan struct{}) - var wg sync.WaitGroup - defer func() { - // Once the writing Reduce phase has ended, all the feed work needs to unconditionally stop. - close(cancel) - - // We want to wait until all outstanding goroutines with an `f` or `f.c` reference have completed. - // Just to be sure we don’t orphan any goroutines any hanging references. - wg.Wait() - }() - - type writeWork struct { - b []byte - off int64 - err error - - next chan writeWork - } - writeCh := make(chan writeWork) - - type readWork struct { - id uint32 - res chan result - off int64 - - cur, next chan writeWork - } - readCh := make(chan readWork) - - // Slice: hand out chunks of work on demand, with a `cur` and `next` channel built-in for sequencing. - go func() { - defer close(readCh) - - off := f.offset - - cur := writeCh - for { - id := f.c.nextID() - res := resPool.Get() - - next := make(chan writeWork) - readWork := readWork{ - id: id, - res: res, - off: off, - - cur: cur, - next: next, - } - - f.c.dispatchRequest(res, &sshFxpReadPacket{ - ID: id, - Handle: f.handle, - Offset: uint64(off), - Len: uint32(chunkSize), - }) - - select { - case readCh <- readWork: - case <-cancel: - return - } - - off += int64(chunkSize) - cur = next - } - }() - - wg.Add(concurrency) - for i := 0; i < concurrency; i++ { - // Map_i: each worker gets readWork, and does the Read into a buffer at the given offset. - go func() { - defer wg.Done() - - for readWork := range readCh { - var b []byte - var n int - - s := <-readWork.res - resPool.Put(readWork.res) - - err := s.err - if err == nil { - switch s.typ { - case sshFxpStatus: - err = normaliseError(unmarshalStatus(readWork.id, s.data)) - - case sshFxpData: - sid, data := unmarshalUint32(s.data) - if readWork.id != sid { - err = &unexpectedIDErr{readWork.id, sid} - - } else { - l, data := unmarshalUint32(data) - b = pool.Get()[:l] - n = copy(b, data[:l]) - b = b[:n] - } - - default: - err = unimplementedPacketErr(s.typ) - } - } - - writeWork := writeWork{ - b: b, - off: readWork.off, - err: err, - - next: readWork.next, - } - - select { - case readWork.cur <- writeWork: - case <-cancel: - return - } - - if err != nil { - return - } - } - }() - } - - // Reduce: serialize the results from the reads into sequential writes. - cur := writeCh - for { - packet, ok := <-cur - if !ok { - return written, errors.New("sftp.File.WriteTo: unexpectedly closed channel") - } - - // Because writes are serialized, this will always be the last successfully read byte. - f.offset = packet.off + int64(len(packet.b)) - - if len(packet.b) > 0 { - n, err := w.Write(packet.b) - written += int64(n) - if err != nil { - return written, err - } - } - - if packet.err != nil { - if packet.err == io.EOF { - return written, nil - } - - return written, packet.err - } - - pool.Put(packet.b) - cur = packet.next - } -} - -// Stat returns the FileInfo structure describing file. If there is an -// error. -func (f *File) Stat() (os.FileInfo, error) { - fs, err := f.c.fstat(f.handle) - if err != nil { - return nil, err - } - return fileInfoFromStat(fs, path.Base(f.path)), nil -} - -// Write writes len(b) bytes to the File. It returns the number of bytes -// written and an error, if any. Write returns a non-nil error when n != -// len(b). -// -// To maximise throughput for transferring the entire file (especially -// over high latency links) it is recommended to use ReadFrom rather -// than calling Write multiple times. io.Copy will do this -// automatically. -func (f *File) Write(b []byte) (int, error) { - f.mu.Lock() - defer f.mu.Unlock() - - n, err := f.WriteAt(b, f.offset) - f.offset += int64(n) - return n, err -} - -func (f *File) writeChunkAt(ch chan result, b []byte, off int64) (int, error) { - typ, data, err := f.c.sendPacket(ch, &sshFxpWritePacket{ - ID: f.c.nextID(), - Handle: f.handle, - Offset: uint64(off), - Length: uint32(len(b)), - Data: b, - }) - if err != nil { - return 0, err - } - - switch typ { - case sshFxpStatus: - id, _ := unmarshalUint32(data) - err := normaliseError(unmarshalStatus(id, data)) - if err != nil { - return 0, err - } - - default: - return 0, unimplementedPacketErr(typ) - } - - return len(b), nil -} - -// writeAtConcurrent implements WriterAt, but works concurrently rather than sequentially. -func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) { - // Split the write into multiple maxPacket sized concurrent writes - // bounded by maxConcurrentRequests. This allows writes with a suitably - // large buffer to transfer data at a much faster rate due to - // overlapping round trip times. - - cancel := make(chan struct{}) - - type work struct { - id uint32 - res chan result - - off int64 - } - workCh := make(chan work) - - concurrency := len(b)/f.c.maxPacket + 1 - if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { - concurrency = f.c.maxConcurrentRequests - } - - pool := newResChanPool(concurrency) - - // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. - go func() { - defer close(workCh) - - var read int - chunkSize := f.c.maxPacket - - for read < len(b) { - wb := b[read:] - if len(wb) > chunkSize { - wb = wb[:chunkSize] - } - - id := f.c.nextID() - res := pool.Get() - off := off + int64(read) - - f.c.dispatchRequest(res, &sshFxpWritePacket{ - ID: id, - Handle: f.handle, - Offset: uint64(off), - Length: uint32(len(wb)), - Data: wb, - }) - - select { - case workCh <- work{id, res, off}: - case <-cancel: - return - } - - read += len(wb) - } - }() - - type wErr struct { - off int64 - err error - } - errCh := make(chan wErr) - - var wg sync.WaitGroup - wg.Add(concurrency) - for i := 0; i < concurrency; i++ { - // Map_i: each worker gets work, and does the Write from each buffer to its respective offset. - go func() { - defer wg.Done() - - for work := range workCh { - s := <-work.res - pool.Put(work.res) - - err := s.err - if err == nil { - switch s.typ { - case sshFxpStatus: - err = normaliseError(unmarshalStatus(work.id, s.data)) - default: - err = unimplementedPacketErr(s.typ) - } - } - - if err != nil { - errCh <- wErr{work.off, err} - } - } - }() - } - - // Wait for long tail, before closing results. - go func() { - wg.Wait() - close(errCh) - }() - - // Reduce: collect all the results into a relevant return: the earliest offset to return an error. - firstErr := wErr{math.MaxInt64, nil} - for wErr := range errCh { - if wErr.off <= firstErr.off { - firstErr = wErr - } - - select { - case <-cancel: - default: - // stop any more work from being distributed. (Just in case.) - close(cancel) - } - } - - if firstErr.err != nil { - // firstErr.err != nil if and only if firstErr.off >= our starting offset. - return int(firstErr.off - off), firstErr.err - } - - return len(b), nil -} - -// WriteAt writes up to len(b) byte to the File at a given offset `off`. It returns -// the number of bytes written and an error, if any. WriteAt follows io.WriterAt semantics, -// so the file offset is not altered during the write. -func (f *File) WriteAt(b []byte, off int64) (written int, err error) { - if len(b) <= f.c.maxPacket { - // We can do this in one write. - return f.writeChunkAt(nil, b, off) - } - - if f.c.useConcurrentWrites { - return f.writeAtConcurrent(b, off) - } - - ch := make(chan result, 1) // reusable channel - - chunkSize := f.c.maxPacket - - for written < len(b) { - wb := b[written:] - if len(wb) > chunkSize { - wb = wb[:chunkSize] - } - - n, err := f.writeChunkAt(ch, wb, off+int64(written)) - if n > 0 { - written += n - } - - if err != nil { - return written, err - } - } - - return len(b), nil -} - -// ReadFromWithConcurrency implements ReaderFrom, -// but uses the given concurrency to issue multiple requests at the same time. -// -// Giving a concurrency of less than one will default to the Client’s max concurrency. -// -// Otherwise, the given concurrency will be capped by the Client's max concurrency. -func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64, err error) { - // Split the write into multiple maxPacket sized concurrent writes. - // This allows writes with a suitably large reader - // to transfer data at a much faster rate due to overlapping round trip times. - - cancel := make(chan struct{}) - - type work struct { - id uint32 - res chan result - - off int64 - } - workCh := make(chan work) - - type rwErr struct { - off int64 - err error - } - errCh := make(chan rwErr) - - if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { - concurrency = f.c.maxConcurrentRequests - } - - pool := newResChanPool(concurrency) - - // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. - go func() { - defer close(workCh) - - b := make([]byte, f.c.maxPacket) - off := f.offset - - for { - n, err := r.Read(b) - - if n > 0 { - read += int64(n) - - id := f.c.nextID() - res := pool.Get() - - f.c.dispatchRequest(res, &sshFxpWritePacket{ - ID: id, - Handle: f.handle, - Offset: uint64(off), - Length: uint32(n), - Data: b, - }) - - select { - case workCh <- work{id, res, off}: - case <-cancel: - return - } - - off += int64(n) - } - - if err != nil { - if err != io.EOF { - errCh <- rwErr{off, err} - } - return - } - } - }() - - var wg sync.WaitGroup - wg.Add(concurrency) - for i := 0; i < concurrency; i++ { - // Map_i: each worker gets work, and does the Write from each buffer to its respective offset. - go func() { - defer wg.Done() - - for work := range workCh { - s := <-work.res - pool.Put(work.res) - - err := s.err - if err == nil { - switch s.typ { - case sshFxpStatus: - err = normaliseError(unmarshalStatus(work.id, s.data)) - default: - err = unimplementedPacketErr(s.typ) - } - } - - if err != nil { - errCh <- rwErr{work.off, err} - } - } - }() - } - - // Wait for long tail, before closing results. - go func() { - wg.Wait() - close(errCh) - }() - - // Reduce: Collect all the results into a relevant return: the earliest offset to return an error. - firstErr := rwErr{math.MaxInt64, nil} - for rwErr := range errCh { - if rwErr.off <= firstErr.off { - firstErr = rwErr - } - - select { - case <-cancel: - default: - // stop any more work from being distributed. - close(cancel) - } - } - - if firstErr.err != nil { - // firstErr.err != nil if and only if firstErr.off is a valid offset. - // - // firstErr.off will then be the lesser of: - // * the offset of the first error from writing, - // * the last successfully read offset. - // - // This could be less than the last successfully written offset, - // which is the whole reason for the UseConcurrentWrites() ClientOption. - // - // Callers are responsible for truncating any SFTP files to a safe length. - f.offset = firstErr.off - - // ReadFrom is defined to return the read bytes, regardless of any writer errors. - return read, firstErr.err - } - - f.offset += read - return read, nil -} - -// ReadFrom reads data from r until EOF and writes it to the file. The return -// value is the number of bytes read. Any error except io.EOF encountered -// during the read is also returned. -// -// This method is preferred over calling Write multiple times -// to maximise throughput for transferring the entire file, -// especially over high-latency links. -func (f *File) ReadFrom(r io.Reader) (int64, error) { - f.mu.Lock() - defer f.mu.Unlock() - - if f.c.useConcurrentWrites { - var remain int64 - switch r := r.(type) { - case interface{ Len() int }: - remain = int64(r.Len()) - - case interface{ Size() int64 }: - remain = r.Size() - - case *io.LimitedReader: - remain = r.N - - case interface{ Stat() (os.FileInfo, error) }: - info, err := r.Stat() - if err == nil { - remain = info.Size() - } - } - - if remain < 0 { - // We can strongly assert that we want default max concurrency here. - return f.ReadFromWithConcurrency(r, f.c.maxConcurrentRequests) - } - - if remain > int64(f.c.maxPacket) { - // Otherwise, only use concurrency, if it would be at least two packets. - - // This is the best reasonable guess we can make. - concurrency64 := remain/int64(f.c.maxPacket) + 1 - - // We need to cap this value to an `int` size value to avoid overflow on 32-bit machines. - // So, we may as well pre-cap it to `f.c.maxConcurrentRequests`. - if concurrency64 > int64(f.c.maxConcurrentRequests) { - concurrency64 = int64(f.c.maxConcurrentRequests) - } - - return f.ReadFromWithConcurrency(r, int(concurrency64)) - } - } - - ch := make(chan result, 1) // reusable channel - - b := make([]byte, f.c.maxPacket) - - var read int64 - for { - n, err := r.Read(b) - if n < 0 { - panic("sftp.File: reader returned negative count from Read") - } - - if n > 0 { - read += int64(n) - - m, err2 := f.writeChunkAt(ch, b[:n], f.offset) - f.offset += int64(m) - - if err == nil { - err = err2 - } - } - - if err != nil { - if err == io.EOF { - return read, nil // return nil explicitly. - } - - return read, err - } - } -} - -// Seek implements io.Seeker by setting the client offset for the next Read or -// Write. It returns the next offset read. Seeking before or after the end of -// the file is undefined. Seeking relative to the end calls Stat. -func (f *File) Seek(offset int64, whence int) (int64, error) { - f.mu.Lock() - defer f.mu.Unlock() - - switch whence { - case io.SeekStart: - case io.SeekCurrent: - offset += f.offset - case io.SeekEnd: - fi, err := f.Stat() - if err != nil { - return f.offset, err - } - offset += fi.Size() - default: - return f.offset, unimplementedSeekWhence(whence) - } - - if offset < 0 { - return f.offset, os.ErrInvalid - } - - f.offset = offset - return f.offset, nil -} - -// Chown changes the uid/gid of the current file. -func (f *File) Chown(uid, gid int) error { - return f.c.Chown(f.path, uid, gid) -} - -// Chmod changes the permissions of the current file. -// -// See Client.Chmod for details. -func (f *File) Chmod(mode os.FileMode) error { - return f.c.setfstat(f.handle, sshFileXferAttrPermissions, toChmodPerm(mode)) -} - -// Sync requests a flush of the contents of a File to stable storage. -// -// Sync requires the server to support the fsync@openssh.com extension. -func (f *File) Sync() error { - id := f.c.nextID() - typ, data, err := f.c.sendPacket(nil, &sshFxpFsyncPacket{ - ID: id, - Handle: f.handle, - }) - - switch { - case err != nil: - return err - case typ == sshFxpStatus: - return normaliseError(unmarshalStatus(id, data)) - default: - return &unexpectedPacketErr{want: sshFxpStatus, got: typ} - } -} - -// Truncate sets the size of the current file. Although it may be safely assumed -// that if the size is less than its current size it will be truncated to fit, -// the SFTP protocol does not specify what behavior the server should do when setting -// size greater than the current size. -// We send a SSH_FXP_FSETSTAT here since we have a file handle -func (f *File) Truncate(size int64) error { - return f.c.setfstat(f.handle, sshFileXferAttrSize, uint64(size)) -} - -// normaliseError normalises an error into a more standard form that can be -// checked against stdlib errors like io.EOF or os.ErrNotExist. -func normaliseError(err error) error { - switch err := err.(type) { - case *StatusError: - switch err.Code { - case sshFxEOF: - return io.EOF - case sshFxNoSuchFile: - return os.ErrNotExist - case sshFxPermissionDenied: - return os.ErrPermission - case sshFxOk: - return nil - default: - return err - } - default: - return err - } -} - -// flags converts the flags passed to OpenFile into ssh flags. -// Unsupported flags are ignored. -func flags(f int) uint32 { - var out uint32 - switch f & os.O_WRONLY { - case os.O_WRONLY: - out |= sshFxfWrite - case os.O_RDONLY: - out |= sshFxfRead - } - if f&os.O_RDWR == os.O_RDWR { - out |= sshFxfRead | sshFxfWrite - } - if f&os.O_APPEND == os.O_APPEND { - out |= sshFxfAppend - } - if f&os.O_CREATE == os.O_CREATE { - out |= sshFxfCreat - } - if f&os.O_TRUNC == os.O_TRUNC { - out |= sshFxfTrunc - } - if f&os.O_EXCL == os.O_EXCL { - out |= sshFxfExcl - } - return out -} - -// toChmodPerm converts Go permission bits to POSIX permission bits. -// -// This differs from fromFileMode in that we preserve the POSIX versions of -// setuid, setgid and sticky in m, because we've historically supported those -// bits, and we mask off any non-permission bits. -func toChmodPerm(m os.FileMode) (perm uint32) { - const mask = os.ModePerm | s_ISUID | s_ISGID | s_ISVTX - perm = uint32(m & mask) - - if m&os.ModeSetuid != 0 { - perm |= s_ISUID - } - if m&os.ModeSetgid != 0 { - perm |= s_ISGID - } - if m&os.ModeSticky != 0 { - perm |= s_ISVTX - } - - return perm -} diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go deleted file mode 100644 index 7d951423..00000000 --- a/vendor/github.com/pkg/sftp/conn.go +++ /dev/null @@ -1,189 +0,0 @@ -package sftp - -import ( - "encoding" - "fmt" - "io" - "sync" -) - -// conn implements a bidirectional channel on which client and server -// connections are multiplexed. -type conn struct { - io.Reader - io.WriteCloser - // this is the same allocator used in packet manager - alloc *allocator - sync.Mutex // used to serialise writes to sendPacket -} - -// the orderID is used in server mode if the allocator is enabled. -// For the client mode just pass 0 -func (c *conn) recvPacket(orderID uint32) (uint8, []byte, error) { - return recvPacket(c, c.alloc, orderID) -} - -func (c *conn) sendPacket(m encoding.BinaryMarshaler) error { - c.Lock() - defer c.Unlock() - - return sendPacket(c, m) -} - -func (c *conn) Close() error { - c.Lock() - defer c.Unlock() - return c.WriteCloser.Close() -} - -type clientConn struct { - conn - wg sync.WaitGroup - - sync.Mutex // protects inflight - inflight map[uint32]chan<- result // outstanding requests - - closed chan struct{} - err error -} - -// Wait blocks until the conn has shut down, and return the error -// causing the shutdown. It can be called concurrently from multiple -// goroutines. -func (c *clientConn) Wait() error { - <-c.closed - return c.err -} - -// Close closes the SFTP session. -func (c *clientConn) Close() error { - defer c.wg.Wait() - return c.conn.Close() -} - -func (c *clientConn) loop() { - defer c.wg.Done() - err := c.recv() - if err != nil { - c.broadcastErr(err) - } -} - -// recv continuously reads from the server and forwards responses to the -// appropriate channel. -func (c *clientConn) recv() error { - defer c.conn.Close() - - for { - typ, data, err := c.recvPacket(0) - if err != nil { - return err - } - sid, _, err := unmarshalUint32Safe(data) - if err != nil { - return err - } - - ch, ok := c.getChannel(sid) - if !ok { - // This is an unexpected occurrence. Send the error - // back to all listeners so that they terminate - // gracefully. - return fmt.Errorf("sid not found: %d", sid) - } - - ch <- result{typ: typ, data: data} - } -} - -func (c *clientConn) putChannel(ch chan<- result, sid uint32) bool { - c.Lock() - defer c.Unlock() - - select { - case <-c.closed: - // already closed with broadcastErr, return error on chan. - ch <- result{err: ErrSSHFxConnectionLost} - return false - default: - } - - c.inflight[sid] = ch - return true -} - -func (c *clientConn) getChannel(sid uint32) (chan<- result, bool) { - c.Lock() - defer c.Unlock() - - ch, ok := c.inflight[sid] - delete(c.inflight, sid) - - return ch, ok -} - -// result captures the result of receiving the a packet from the server -type result struct { - typ byte - data []byte - err error -} - -type idmarshaler interface { - id() uint32 - encoding.BinaryMarshaler -} - -func (c *clientConn) sendPacket(ch chan result, p idmarshaler) (byte, []byte, error) { - if cap(ch) < 1 { - ch = make(chan result, 1) - } - - c.dispatchRequest(ch, p) - s := <-ch - return s.typ, s.data, s.err -} - -// dispatchRequest should ideally only be called by race-detection tests outside of this file, -// where you have to ensure two packets are in flight sequentially after each other. -func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) { - sid := p.id() - - if !c.putChannel(ch, sid) { - // already closed. - return - } - - if err := c.conn.sendPacket(p); err != nil { - if ch, ok := c.getChannel(sid); ok { - ch <- result{err: err} - } - } -} - -// broadcastErr sends an error to all goroutines waiting for a response. -func (c *clientConn) broadcastErr(err error) { - c.Lock() - defer c.Unlock() - - bcastRes := result{err: ErrSSHFxConnectionLost} - for sid, ch := range c.inflight { - ch <- bcastRes - - // Replace the chan in inflight, - // we have hijacked this chan, - // and this guarantees always-only-once sending. - c.inflight[sid] = make(chan<- result, 1) - } - - c.err = err - close(c.closed) -} - -type serverConn struct { - conn -} - -func (s *serverConn) sendError(id uint32, err error) error { - return s.sendPacket(statusFromError(id, err)) -} diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go deleted file mode 100644 index 3e264abe..00000000 --- a/vendor/github.com/pkg/sftp/debug.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build debug - -package sftp - -import "log" - -func debug(fmt string, args ...interface{}) { - log.Printf(fmt, args...) -} diff --git a/vendor/github.com/pkg/sftp/fuzz.go b/vendor/github.com/pkg/sftp/fuzz.go deleted file mode 100644 index 169aebc2..00000000 --- a/vendor/github.com/pkg/sftp/fuzz.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build gofuzz - -package sftp - -import "bytes" - -type sinkfuzz struct{} - -func (*sinkfuzz) Close() error { return nil } -func (*sinkfuzz) Write(p []byte) (int, error) { return len(p), nil } - -var devnull = &sinkfuzz{} - -// To run: go-fuzz-build && go-fuzz -func Fuzz(data []byte) int { - c, err := NewClientPipe(bytes.NewReader(data), devnull) - if err != nil { - return 0 - } - c.Close() - return 1 -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go deleted file mode 100644 index eed61bfc..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go +++ /dev/null @@ -1,325 +0,0 @@ -package filexfer - -// Attributes related flags. -const ( - AttrSize = 1 << iota // SSH_FILEXFER_ATTR_SIZE - AttrUIDGID // SSH_FILEXFER_ATTR_UIDGID - AttrPermissions // SSH_FILEXFER_ATTR_PERMISSIONS - AttrACModTime // SSH_FILEXFER_ACMODTIME - - AttrExtended = 1 << 31 // SSH_FILEXFER_ATTR_EXTENDED -) - -// Attributes defines the file attributes type defined in draft-ietf-secsh-filexfer-02 -// -// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 -type Attributes struct { - Flags uint32 - - // AttrSize - Size uint64 - - // AttrUIDGID - UID uint32 - GID uint32 - - // AttrPermissions - Permissions FileMode - - // AttrACmodTime - ATime uint32 - MTime uint32 - - // AttrExtended - ExtendedAttributes []ExtendedAttribute -} - -// GetSize returns the Size field and a bool that is true if and only if the value is valid/defined. -func (a *Attributes) GetSize() (size uint64, ok bool) { - return a.Size, a.Flags&AttrSize != 0 -} - -// SetSize is a convenience function that sets the Size field, -// and marks the field as valid/defined in Flags. -func (a *Attributes) SetSize(size uint64) { - a.Flags |= AttrSize - a.Size = size -} - -// GetUIDGID returns the UID and GID fields and a bool that is true if and only if the values are valid/defined. -func (a *Attributes) GetUIDGID() (uid, gid uint32, ok bool) { - return a.UID, a.GID, a.Flags&AttrUIDGID != 0 -} - -// SetUIDGID is a convenience function that sets the UID and GID fields, -// and marks the fields as valid/defined in Flags. -func (a *Attributes) SetUIDGID(uid, gid uint32) { - a.Flags |= AttrUIDGID - a.UID = uid - a.GID = gid -} - -// GetPermissions returns the Permissions field and a bool that is true if and only if the value is valid/defined. -func (a *Attributes) GetPermissions() (perms FileMode, ok bool) { - return a.Permissions, a.Flags&AttrPermissions != 0 -} - -// SetPermissions is a convenience function that sets the Permissions field, -// and marks the field as valid/defined in Flags. -func (a *Attributes) SetPermissions(perms FileMode) { - a.Flags |= AttrPermissions - a.Permissions = perms -} - -// GetACModTime returns the ATime and MTime fields and a bool that is true if and only if the values are valid/defined. -func (a *Attributes) GetACModTime() (atime, mtime uint32, ok bool) { - return a.ATime, a.MTime, a.Flags&AttrACModTime != 0 -} - -// SetACModTime is a convenience function that sets the ATime and MTime fields, -// and marks the fields as valid/defined in Flags. -func (a *Attributes) SetACModTime(atime, mtime uint32) { - a.Flags |= AttrACModTime - a.ATime = atime - a.MTime = mtime -} - -// Len returns the number of bytes a would marshal into. -func (a *Attributes) Len() int { - length := 4 - - if a.Flags&AttrSize != 0 { - length += 8 - } - - if a.Flags&AttrUIDGID != 0 { - length += 4 + 4 - } - - if a.Flags&AttrPermissions != 0 { - length += 4 - } - - if a.Flags&AttrACModTime != 0 { - length += 4 + 4 - } - - if a.Flags&AttrExtended != 0 { - length += 4 - - for _, ext := range a.ExtendedAttributes { - length += ext.Len() - } - } - - return length -} - -// MarshalInto marshals e onto the end of the given Buffer. -func (a *Attributes) MarshalInto(b *Buffer) { - b.AppendUint32(a.Flags) - - if a.Flags&AttrSize != 0 { - b.AppendUint64(a.Size) - } - - if a.Flags&AttrUIDGID != 0 { - b.AppendUint32(a.UID) - b.AppendUint32(a.GID) - } - - if a.Flags&AttrPermissions != 0 { - b.AppendUint32(uint32(a.Permissions)) - } - - if a.Flags&AttrACModTime != 0 { - b.AppendUint32(a.ATime) - b.AppendUint32(a.MTime) - } - - if a.Flags&AttrExtended != 0 { - b.AppendUint32(uint32(len(a.ExtendedAttributes))) - - for _, ext := range a.ExtendedAttributes { - ext.MarshalInto(b) - } - } -} - -// MarshalBinary returns a as the binary encoding of a. -func (a *Attributes) MarshalBinary() ([]byte, error) { - buf := NewBuffer(make([]byte, 0, a.Len())) - a.MarshalInto(buf) - return buf.Bytes(), nil -} - -// UnmarshalFrom unmarshals an Attributes from the given Buffer into e. -// -// NOTE: The values of fields not covered in the a.Flags are explicitly undefined. -func (a *Attributes) UnmarshalFrom(b *Buffer) (err error) { - flags, err := b.ConsumeUint32() - if err != nil { - return err - } - - return a.XXX_UnmarshalByFlags(flags, b) -} - -// XXX_UnmarshalByFlags uses the pre-existing a.Flags field to determine which fields to decode. -// DO NOT USE THIS: it is an anti-corruption function to implement existing internal usage in pkg/sftp. -// This function is not a part of any compatibility promise. -func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, b *Buffer) (err error) { - a.Flags = flags - - // Short-circuit dummy attributes. - if a.Flags == 0 { - return nil - } - - if a.Flags&AttrSize != 0 { - if a.Size, err = b.ConsumeUint64(); err != nil { - return err - } - } - - if a.Flags&AttrUIDGID != 0 { - if a.UID, err = b.ConsumeUint32(); err != nil { - return err - } - - if a.GID, err = b.ConsumeUint32(); err != nil { - return err - } - } - - if a.Flags&AttrPermissions != 0 { - m, err := b.ConsumeUint32() - if err != nil { - return err - } - - a.Permissions = FileMode(m) - } - - if a.Flags&AttrACModTime != 0 { - if a.ATime, err = b.ConsumeUint32(); err != nil { - return err - } - - if a.MTime, err = b.ConsumeUint32(); err != nil { - return err - } - } - - if a.Flags&AttrExtended != 0 { - count, err := b.ConsumeUint32() - if err != nil { - return err - } - - a.ExtendedAttributes = make([]ExtendedAttribute, count) - for i := range a.ExtendedAttributes { - a.ExtendedAttributes[i].UnmarshalFrom(b) - } - } - - return nil -} - -// UnmarshalBinary decodes the binary encoding of Attributes into e. -func (a *Attributes) UnmarshalBinary(data []byte) error { - return a.UnmarshalFrom(NewBuffer(data)) -} - -// ExtendedAttribute defines the extended file attribute type defined in draft-ietf-secsh-filexfer-02 -// -// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 -type ExtendedAttribute struct { - Type string - Data string -} - -// Len returns the number of bytes e would marshal into. -func (e *ExtendedAttribute) Len() int { - return 4 + len(e.Type) + 4 + len(e.Data) -} - -// MarshalInto marshals e onto the end of the given Buffer. -func (e *ExtendedAttribute) MarshalInto(b *Buffer) { - b.AppendString(e.Type) - b.AppendString(e.Data) -} - -// MarshalBinary returns e as the binary encoding of e. -func (e *ExtendedAttribute) MarshalBinary() ([]byte, error) { - buf := NewBuffer(make([]byte, 0, e.Len())) - e.MarshalInto(buf) - return buf.Bytes(), nil -} - -// UnmarshalFrom unmarshals an ExtendedAattribute from the given Buffer into e. -func (e *ExtendedAttribute) UnmarshalFrom(b *Buffer) (err error) { - if e.Type, err = b.ConsumeString(); err != nil { - return err - } - - if e.Data, err = b.ConsumeString(); err != nil { - return err - } - - return nil -} - -// UnmarshalBinary decodes the binary encoding of ExtendedAttribute into e. -func (e *ExtendedAttribute) UnmarshalBinary(data []byte) error { - return e.UnmarshalFrom(NewBuffer(data)) -} - -// NameEntry implements the SSH_FXP_NAME repeated data type from draft-ietf-secsh-filexfer-02 -// -// This type is incompatible with versions 4 or higher. -type NameEntry struct { - Filename string - Longname string - Attrs Attributes -} - -// Len returns the number of bytes e would marshal into. -func (e *NameEntry) Len() int { - return 4 + len(e.Filename) + 4 + len(e.Longname) + e.Attrs.Len() -} - -// MarshalInto marshals e onto the end of the given Buffer. -func (e *NameEntry) MarshalInto(b *Buffer) { - b.AppendString(e.Filename) - b.AppendString(e.Longname) - - e.Attrs.MarshalInto(b) -} - -// MarshalBinary returns e as the binary encoding of e. -func (e *NameEntry) MarshalBinary() ([]byte, error) { - buf := NewBuffer(make([]byte, 0, e.Len())) - e.MarshalInto(buf) - return buf.Bytes(), nil -} - -// UnmarshalFrom unmarshals an NameEntry from the given Buffer into e. -// -// NOTE: The values of fields not covered in the a.Flags are explicitly undefined. -func (e *NameEntry) UnmarshalFrom(b *Buffer) (err error) { - if e.Filename, err = b.ConsumeString(); err != nil { - return err - } - - if e.Longname, err = b.ConsumeString(); err != nil { - return err - } - - return e.Attrs.UnmarshalFrom(b) -} - -// UnmarshalBinary decodes the binary encoding of NameEntry into e. -func (e *NameEntry) UnmarshalBinary(data []byte) error { - return e.UnmarshalFrom(NewBuffer(data)) -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go deleted file mode 100644 index a6086036..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go +++ /dev/null @@ -1,293 +0,0 @@ -package filexfer - -import ( - "encoding/binary" - "errors" -) - -// Various encoding errors. -var ( - ErrShortPacket = errors.New("packet too short") - ErrLongPacket = errors.New("packet too long") -) - -// Buffer wraps up the various encoding details of the SSH format. -// -// Data types are encoded as per section 4 from https://tools.ietf.org/html/draft-ietf-secsh-architecture-09#page-8 -type Buffer struct { - b []byte - off int -} - -// NewBuffer creates and initializes a new buffer using buf as its initial contents. -// The new buffer takes ownership of buf, and the caller should not use buf after this call. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { - return &Buffer{ - b: buf, - } -} - -// NewMarshalBuffer creates a new Buffer ready to start marshaling a Packet into. -// It preallocates enough space for uint32(length), uint8(type), uint32(request-id) and size more bytes. -func NewMarshalBuffer(size int) *Buffer { - return NewBuffer(make([]byte, 4+1+4+size)) -} - -// Bytes returns a slice of length b.Len() holding the unconsumed bytes in the Buffer. -// The slice is valid for use only until the next buffer modification -// (that is, only until the next call to an Append or Consume method). -func (b *Buffer) Bytes() []byte { - return b.b[b.off:] -} - -// Len returns the number of unconsumed bytes in the buffer. -func (b *Buffer) Len() int { return len(b.b) - b.off } - -// Cap returns the capacity of the buffer’s underlying byte slice, -// that is, the total space allocated for the buffer’s data. -func (b *Buffer) Cap() int { return cap(b.b) } - -// Reset resets the buffer to be empty, but it retains the underlying storage for use by future Appends. -func (b *Buffer) Reset() { - b.b = b.b[:0] - b.off = 0 -} - -// StartPacket resets and initializes the buffer to be ready to start marshaling a packet into. -// It truncates the buffer, reserves space for uint32(length), then appends the given packetType and requestID. -func (b *Buffer) StartPacket(packetType PacketType, requestID uint32) { - b.b, b.off = append(b.b[:0], make([]byte, 4)...), 0 - - b.AppendUint8(uint8(packetType)) - b.AppendUint32(requestID) -} - -// Packet finalizes the packet started from StartPacket. -// It is expected that this will end the ownership of the underlying byte-slice, -// and so the returned byte-slices may be reused the same as any other byte-slice, -// the caller should not use this buffer after this call. -// -// It writes the packet body length into the first four bytes of the buffer in network byte order (big endian). -// The packet body length is the length of this buffer less the 4-byte length itself, plus the length of payload. -// -// It is assumed that no Consume methods have been called on this buffer, -// and so it returns the whole underlying slice. -func (b *Buffer) Packet(payload []byte) (header, payloadPassThru []byte, err error) { - b.PutLength(len(b.b) - 4 + len(payload)) - - return b.b, payload, nil -} - -// ConsumeUint8 consumes a single byte from the buffer. -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint8() (uint8, error) { - if b.Len() < 1 { - return 0, ErrShortPacket - } - - var v uint8 - v, b.off = b.b[b.off], b.off+1 - return v, nil -} - -// AppendUint8 appends a single byte into the buffer. -func (b *Buffer) AppendUint8(v uint8) { - b.b = append(b.b, v) -} - -// ConsumeBool consumes a single byte from the buffer, and returns true if that byte is non-zero. -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeBool() (bool, error) { - v, err := b.ConsumeUint8() - if err != nil { - return false, err - } - - return v != 0, nil -} - -// AppendBool appends a single bool into the buffer. -// It encodes it as a single byte, with false as 0, and true as 1. -func (b *Buffer) AppendBool(v bool) { - if v { - b.AppendUint8(1) - } else { - b.AppendUint8(0) - } -} - -// ConsumeUint16 consumes a single uint16 from the buffer, in network byte order (big-endian). -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint16() (uint16, error) { - if b.Len() < 2 { - return 0, ErrShortPacket - } - - v := binary.BigEndian.Uint16(b.b[b.off:]) - b.off += 2 - return v, nil -} - -// AppendUint16 appends single uint16 into the buffer, in network byte order (big-endian). -func (b *Buffer) AppendUint16(v uint16) { - b.b = append(b.b, - byte(v>>8), - byte(v>>0), - ) -} - -// unmarshalUint32 is used internally to read the packet length. -// It is unsafe, and so not exported. -// Even within this package, its use should be avoided. -func unmarshalUint32(b []byte) uint32 { - return binary.BigEndian.Uint32(b[:4]) -} - -// ConsumeUint32 consumes a single uint32 from the buffer, in network byte order (big-endian). -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint32() (uint32, error) { - if b.Len() < 4 { - return 0, ErrShortPacket - } - - v := binary.BigEndian.Uint32(b.b[b.off:]) - b.off += 4 - return v, nil -} - -// AppendUint32 appends a single uint32 into the buffer, in network byte order (big-endian). -func (b *Buffer) AppendUint32(v uint32) { - b.b = append(b.b, - byte(v>>24), - byte(v>>16), - byte(v>>8), - byte(v>>0), - ) -} - -// ConsumeUint64 consumes a single uint64 from the buffer, in network byte order (big-endian). -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint64() (uint64, error) { - if b.Len() < 8 { - return 0, ErrShortPacket - } - - v := binary.BigEndian.Uint64(b.b[b.off:]) - b.off += 8 - return v, nil -} - -// AppendUint64 appends a single uint64 into the buffer, in network byte order (big-endian). -func (b *Buffer) AppendUint64(v uint64) { - b.b = append(b.b, - byte(v>>56), - byte(v>>48), - byte(v>>40), - byte(v>>32), - byte(v>>24), - byte(v>>16), - byte(v>>8), - byte(v>>0), - ) -} - -// ConsumeInt64 consumes a single int64 from the buffer, in network byte order (big-endian) with two’s complement. -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeInt64() (int64, error) { - u, err := b.ConsumeUint64() - if err != nil { - return 0, err - } - - return int64(u), err -} - -// AppendInt64 appends a single int64 into the buffer, in network byte order (big-endian) with two’s complement. -func (b *Buffer) AppendInt64(v int64) { - b.AppendUint64(uint64(v)) -} - -// ConsumeByteSlice consumes a single string of raw binary data from the buffer. -// A string is a uint32 length, followed by that number of raw bytes. -// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. -// -// The returned slice aliases the buffer contents, and is valid only as long as the buffer is not reused -// (that is, only until the next call to Reset, PutLength, StartPacket, or UnmarshalBinary). -// -// In no case will any Consume calls return overlapping slice aliases, -// and Append calls are guaranteed to not disturb this slice alias. -func (b *Buffer) ConsumeByteSlice() ([]byte, error) { - length, err := b.ConsumeUint32() - if err != nil { - return nil, err - } - - if b.Len() < int(length) { - return nil, ErrShortPacket - } - - v := b.b[b.off:] - if len(v) > int(length) { - v = v[:length:length] - } - b.off += int(length) - return v, nil -} - -// AppendByteSlice appends a single string of raw binary data into the buffer. -// A string is a uint32 length, followed by that number of raw bytes. -func (b *Buffer) AppendByteSlice(v []byte) { - b.AppendUint32(uint32(len(v))) - b.b = append(b.b, v...) -} - -// ConsumeString consumes a single string of binary data from the buffer. -// A string is a uint32 length, followed by that number of raw bytes. -// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. -// -// NOTE: Go implicitly assumes that strings contain UTF-8 encoded data. -// All caveats on using arbitrary binary data in Go strings applies. -func (b *Buffer) ConsumeString() (string, error) { - v, err := b.ConsumeByteSlice() - if err != nil { - return "", err - } - - return string(v), nil -} - -// AppendString appends a single string of binary data into the buffer. -// A string is a uint32 length, followed by that number of raw bytes. -func (b *Buffer) AppendString(v string) { - b.AppendByteSlice([]byte(v)) -} - -// PutLength writes the given size into the first four bytes of the buffer in network byte order (big endian). -func (b *Buffer) PutLength(size int) { - if len(b.b) < 4 { - b.b = append(b.b, make([]byte, 4-len(b.b))...) - } - - binary.BigEndian.PutUint32(b.b, uint32(size)) -} - -// MarshalBinary returns a clone of the full internal buffer. -func (b *Buffer) MarshalBinary() ([]byte, error) { - clone := make([]byte, len(b.b)) - n := copy(clone, b.b) - return clone[:n], nil -} - -// UnmarshalBinary sets the internal buffer of b to be a clone of data, and zeros the internal offset. -func (b *Buffer) UnmarshalBinary(data []byte) error { - if grow := len(data) - len(b.b); grow > 0 { - b.b = append(b.b, make([]byte, grow)...) - } - - n := copy(b.b, data) - b.b = b.b[:n] - b.off = 0 - return nil -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go deleted file mode 100644 index 6b7b2cef..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go +++ /dev/null @@ -1,142 +0,0 @@ -package filexfer - -import ( - "encoding" - "sync" -) - -// ExtendedData aliases the untyped interface composition of encoding.BinaryMarshaler and encoding.BinaryUnmarshaler. -type ExtendedData = interface { - encoding.BinaryMarshaler - encoding.BinaryUnmarshaler -} - -// ExtendedDataConstructor defines a function that returns a new(ArbitraryExtendedPacket). -type ExtendedDataConstructor func() ExtendedData - -var extendedPacketTypes = struct { - mu sync.RWMutex - constructors map[string]ExtendedDataConstructor -}{ - constructors: make(map[string]ExtendedDataConstructor), -} - -// RegisterExtendedPacketType defines a specific ExtendedDataConstructor for the given extension string. -func RegisterExtendedPacketType(extension string, constructor ExtendedDataConstructor) { - extendedPacketTypes.mu.Lock() - defer extendedPacketTypes.mu.Unlock() - - if _, exist := extendedPacketTypes.constructors[extension]; exist { - panic("encoding/ssh/filexfer: multiple registration of extended packet type " + extension) - } - - extendedPacketTypes.constructors[extension] = constructor -} - -func newExtendedPacket(extension string) ExtendedData { - extendedPacketTypes.mu.RLock() - defer extendedPacketTypes.mu.RUnlock() - - if f := extendedPacketTypes.constructors[extension]; f != nil { - return f() - } - - return new(Buffer) -} - -// ExtendedPacket defines the SSH_FXP_CLOSE packet. -type ExtendedPacket struct { - ExtendedRequest string - - Data ExtendedData -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *ExtendedPacket) Type() PacketType { - return PacketTypeExtended -} - -// MarshalPacket returns p as a two-part binary encoding of p. -// -// The Data is marshaled into binary, and returned as the payload. -func (p *ExtendedPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.ExtendedRequest) // string(extended-request) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeExtended, reqid) - buf.AppendString(p.ExtendedRequest) - - if p.Data != nil { - payload, err = p.Data.MarshalBinary() - if err != nil { - return nil, nil, err - } - } - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -// -// If p.Data is nil, and the extension has been registered, a new type will be made from the registration. -// If the extension has not been registered, then a new Buffer will be allocated. -// Then the request-specific-data will be unmarshaled from the rest of the buffer. -func (p *ExtendedPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.ExtendedRequest, err = buf.ConsumeString(); err != nil { - return err - } - - if p.Data == nil { - p.Data = newExtendedPacket(p.ExtendedRequest) - } - - return p.Data.UnmarshalBinary(buf.Bytes()) -} - -// ExtendedReplyPacket defines the SSH_FXP_CLOSE packet. -type ExtendedReplyPacket struct { - Data ExtendedData -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *ExtendedReplyPacket) Type() PacketType { - return PacketTypeExtendedReply -} - -// MarshalPacket returns p as a two-part binary encoding of p. -// -// The Data is marshaled into binary, and returned as the payload. -func (p *ExtendedReplyPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - buf = NewMarshalBuffer(0) - } - - buf.StartPacket(PacketTypeExtendedReply, reqid) - - if p.Data != nil { - payload, err = p.Data.MarshalBinary() - if err != nil { - return nil, nil, err - } - } - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -// -// If p.Data is nil, and there is request-specific-data, -// then the request-specific-data will be wrapped in a Buffer and assigned to p.Data. -func (p *ExtendedReplyPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Data == nil { - p.Data = new(Buffer) - } - - return p.Data.UnmarshalBinary(buf.Bytes()) -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go deleted file mode 100644 index 11c0b99c..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go +++ /dev/null @@ -1,46 +0,0 @@ -package filexfer - -// ExtensionPair defines the extension-pair type defined in draft-ietf-secsh-filexfer-13. -// This type is backwards-compatible with how draft-ietf-secsh-filexfer-02 defines extensions. -// -// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-4.2 -type ExtensionPair struct { - Name string - Data string -} - -// Len returns the number of bytes e would marshal into. -func (e *ExtensionPair) Len() int { - return 4 + len(e.Name) + 4 + len(e.Data) -} - -// MarshalInto marshals e onto the end of the given Buffer. -func (e *ExtensionPair) MarshalInto(buf *Buffer) { - buf.AppendString(e.Name) - buf.AppendString(e.Data) -} - -// MarshalBinary returns e as the binary encoding of e. -func (e *ExtensionPair) MarshalBinary() ([]byte, error) { - buf := NewBuffer(make([]byte, 0, e.Len())) - e.MarshalInto(buf) - return buf.Bytes(), nil -} - -// UnmarshalFrom unmarshals an ExtensionPair from the given Buffer into e. -func (e *ExtensionPair) UnmarshalFrom(buf *Buffer) (err error) { - if e.Name, err = buf.ConsumeString(); err != nil { - return err - } - - if e.Data, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// UnmarshalBinary decodes the binary encoding of ExtensionPair into e. -func (e *ExtensionPair) UnmarshalBinary(data []byte) error { - return e.UnmarshalFrom(NewBuffer(data)) -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go deleted file mode 100644 index 1e5abf74..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package filexfer implements the wire encoding for secsh-filexfer as described in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 -package filexfer - -// PacketMarshaller narrowly defines packets that will only be transmitted. -// -// ExtendedPacket types will often only implement this interface, -// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field. -type PacketMarshaller interface { - // MarshalPacket is the primary intended way to encode a packet. - // The request-id for the packet is set from reqid. - // - // An optional buffer may be given in b. - // If the buffer has a minimum capacity, it shall be truncated and used to marshal the header into. - // The minimum capacity for the packet must be a constant expression, and should be at least 9. - // - // It shall return the main body of the encoded packet in header, - // and may optionally return an additional payload to be written immediately after the header. - // - // It shall encode in the first 4-bytes of the header the proper length of the rest of the header+payload. - MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) -} - -// Packet defines the behavior of a full generic SFTP packet. -// -// InitPacket, and VersionPacket are not generic SFTP packets, and instead implement (Un)MarshalBinary. -// -// ExtendedPacket types should not iplement this interface, -// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field. -type Packet interface { - PacketMarshaller - - // Type returns the SSH_FXP_xy value associated with the specific packet. - Type() PacketType - - // UnmarshalPacketBody decodes a packet body from the given Buffer. - // It is assumed that the common header values of the length, type and request-id have already been consumed. - // - // Implementations should not alias the given Buffer, - // instead they can consider prepopulating an internal buffer as a hint, - // and copying into that buffer if it has sufficient length. - UnmarshalPacketBody(buf *Buffer) error -} - -// ComposePacket converts returns from MarshalPacket into an equivalent call to MarshalBinary. -func ComposePacket(header, payload []byte, err error) ([]byte, error) { - return append(header, payload...), err -} - -// Default length values, -// Defined in draft-ietf-secsh-filexfer-02 section 3. -const ( - DefaultMaxPacketLength = 34000 - DefaultMaxDataLength = 32768 -) diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go deleted file mode 100644 index 48f86986..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go +++ /dev/null @@ -1,147 +0,0 @@ -package filexfer - -import ( - "fmt" -) - -// Status defines the SFTP error codes used in SSH_FXP_STATUS response packets. -type Status uint32 - -// Defines the various SSH_FX_* values. -const ( - // see draft-ietf-secsh-filexfer-02 - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7 - StatusOK = Status(iota) - StatusEOF - StatusNoSuchFile - StatusPermissionDenied - StatusFailure - StatusBadMessage - StatusNoConnection - StatusConnectionLost - StatusOPUnsupported - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-03#section-7 - StatusV4InvalidHandle - StatusV4NoSuchPath - StatusV4FileAlreadyExists - StatusV4WriteProtect - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-04#section-7 - StatusV4NoMedia - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-05#section-7 - StatusV5NoSpaceOnFilesystem - StatusV5QuotaExceeded - StatusV5UnknownPrincipal - StatusV5LockConflict - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-06#section-8 - StatusV6DirNotEmpty - StatusV6NotADirectory - StatusV6InvalidFilename - StatusV6LinkLoop - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-8 - StatusV6CannotDelete - StatusV6InvalidParameter - StatusV6FileIsADirectory - StatusV6ByteRangeLockConflict - StatusV6ByteRangeLockRefused - StatusV6DeletePending - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-8.1 - StatusV6FileCorrupt - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-10#section-9.1 - StatusV6OwnerInvalid - StatusV6GroupInvalid - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 - StatusV6NoMatchingByteRangeLock -) - -func (s Status) Error() string { - return s.String() -} - -// Is returns true if the target is the same Status code, -// or target is a StatusPacket with the same Status code. -func (s Status) Is(target error) bool { - if target, ok := target.(*StatusPacket); ok { - return target.StatusCode == s - } - - return s == target -} - -func (s Status) String() string { - switch s { - case StatusOK: - return "SSH_FX_OK" - case StatusEOF: - return "SSH_FX_EOF" - case StatusNoSuchFile: - return "SSH_FX_NO_SUCH_FILE" - case StatusPermissionDenied: - return "SSH_FX_PERMISSION_DENIED" - case StatusFailure: - return "SSH_FX_FAILURE" - case StatusBadMessage: - return "SSH_FX_BAD_MESSAGE" - case StatusNoConnection: - return "SSH_FX_NO_CONNECTION" - case StatusConnectionLost: - return "SSH_FX_CONNECTION_LOST" - case StatusOPUnsupported: - return "SSH_FX_OP_UNSUPPORTED" - case StatusV4InvalidHandle: - return "SSH_FX_INVALID_HANDLE" - case StatusV4NoSuchPath: - return "SSH_FX_NO_SUCH_PATH" - case StatusV4FileAlreadyExists: - return "SSH_FX_FILE_ALREADY_EXISTS" - case StatusV4WriteProtect: - return "SSH_FX_WRITE_PROTECT" - case StatusV4NoMedia: - return "SSH_FX_NO_MEDIA" - case StatusV5NoSpaceOnFilesystem: - return "SSH_FX_NO_SPACE_ON_FILESYSTEM" - case StatusV5QuotaExceeded: - return "SSH_FX_QUOTA_EXCEEDED" - case StatusV5UnknownPrincipal: - return "SSH_FX_UNKNOWN_PRINCIPAL" - case StatusV5LockConflict: - return "SSH_FX_LOCK_CONFLICT" - case StatusV6DirNotEmpty: - return "SSH_FX_DIR_NOT_EMPTY" - case StatusV6NotADirectory: - return "SSH_FX_NOT_A_DIRECTORY" - case StatusV6InvalidFilename: - return "SSH_FX_INVALID_FILENAME" - case StatusV6LinkLoop: - return "SSH_FX_LINK_LOOP" - case StatusV6CannotDelete: - return "SSH_FX_CANNOT_DELETE" - case StatusV6InvalidParameter: - return "SSH_FX_INVALID_PARAMETER" - case StatusV6FileIsADirectory: - return "SSH_FX_FILE_IS_A_DIRECTORY" - case StatusV6ByteRangeLockConflict: - return "SSH_FX_BYTE_RANGE_LOCK_CONFLICT" - case StatusV6ByteRangeLockRefused: - return "SSH_FX_BYTE_RANGE_LOCK_REFUSED" - case StatusV6DeletePending: - return "SSH_FX_DELETE_PENDING" - case StatusV6FileCorrupt: - return "SSH_FX_FILE_CORRUPT" - case StatusV6OwnerInvalid: - return "SSH_FX_OWNER_INVALID" - case StatusV6GroupInvalid: - return "SSH_FX_GROUP_INVALID" - case StatusV6NoMatchingByteRangeLock: - return "SSH_FX_NO_MATCHING_BYTE_RANGE_LOCK" - default: - return fmt.Sprintf("SSH_FX_UNKNOWN(%d)", s) - } -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go deleted file mode 100644 index 15caf6d2..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go +++ /dev/null @@ -1,124 +0,0 @@ -package filexfer - -import ( - "fmt" -) - -// PacketType defines the various SFTP packet types. -type PacketType uint8 - -// Request packet types. -const ( - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 - PacketTypeInit = PacketType(iota + 1) - PacketTypeVersion - PacketTypeOpen - PacketTypeClose - PacketTypeRead - PacketTypeWrite - PacketTypeLStat - PacketTypeFStat - PacketTypeSetstat - PacketTypeFSetstat - PacketTypeOpenDir - PacketTypeReadDir - PacketTypeRemove - PacketTypeMkdir - PacketTypeRmdir - PacketTypeRealPath - PacketTypeStat - PacketTypeRename - PacketTypeReadLink - PacketTypeSymlink - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-3.3 - PacketTypeV6Link - - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-3.3 - PacketTypeV6Block - PacketTypeV6Unblock -) - -// Response packet types. -const ( - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 - PacketTypeStatus = PacketType(iota + 101) - PacketTypeHandle - PacketTypeData - PacketTypeName - PacketTypeAttrs -) - -// Extended packet types. -const ( - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 - PacketTypeExtended = PacketType(iota + 200) - PacketTypeExtendedReply -) - -func (f PacketType) String() string { - switch f { - case PacketTypeInit: - return "SSH_FXP_INIT" - case PacketTypeVersion: - return "SSH_FXP_VERSION" - case PacketTypeOpen: - return "SSH_FXP_OPEN" - case PacketTypeClose: - return "SSH_FXP_CLOSE" - case PacketTypeRead: - return "SSH_FXP_READ" - case PacketTypeWrite: - return "SSH_FXP_WRITE" - case PacketTypeLStat: - return "SSH_FXP_LSTAT" - case PacketTypeFStat: - return "SSH_FXP_FSTAT" - case PacketTypeSetstat: - return "SSH_FXP_SETSTAT" - case PacketTypeFSetstat: - return "SSH_FXP_FSETSTAT" - case PacketTypeOpenDir: - return "SSH_FXP_OPENDIR" - case PacketTypeReadDir: - return "SSH_FXP_READDIR" - case PacketTypeRemove: - return "SSH_FXP_REMOVE" - case PacketTypeMkdir: - return "SSH_FXP_MKDIR" - case PacketTypeRmdir: - return "SSH_FXP_RMDIR" - case PacketTypeRealPath: - return "SSH_FXP_REALPATH" - case PacketTypeStat: - return "SSH_FXP_STAT" - case PacketTypeRename: - return "SSH_FXP_RENAME" - case PacketTypeReadLink: - return "SSH_FXP_READLINK" - case PacketTypeSymlink: - return "SSH_FXP_SYMLINK" - case PacketTypeV6Link: - return "SSH_FXP_LINK" - case PacketTypeV6Block: - return "SSH_FXP_BLOCK" - case PacketTypeV6Unblock: - return "SSH_FXP_UNBLOCK" - case PacketTypeStatus: - return "SSH_FXP_STATUS" - case PacketTypeHandle: - return "SSH_FXP_HANDLE" - case PacketTypeData: - return "SSH_FXP_DATA" - case PacketTypeName: - return "SSH_FXP_NAME" - case PacketTypeAttrs: - return "SSH_FXP_ATTRS" - case PacketTypeExtended: - return "SSH_FXP_EXTENDED" - case PacketTypeExtendedReply: - return "SSH_FXP_EXTENDED_REPLY" - default: - return fmt.Sprintf("SSH_FXP_UNKNOWN(%d)", f) - } -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go deleted file mode 100644 index a1427712..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go +++ /dev/null @@ -1,249 +0,0 @@ -package filexfer - -// ClosePacket defines the SSH_FXP_CLOSE packet. -type ClosePacket struct { - Handle string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *ClosePacket) Type() PacketType { - return PacketTypeClose -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *ClosePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Handle) // string(handle) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeClose, reqid) - buf.AppendString(p.Handle) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *ClosePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// ReadPacket defines the SSH_FXP_READ packet. -type ReadPacket struct { - Handle string - Offset uint64 - Len uint32 -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *ReadPacket) Type() PacketType { - return PacketTypeRead -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - // string(handle) + uint64(offset) + uint32(len) - size := 4 + len(p.Handle) + 8 + 4 - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeRead, reqid) - buf.AppendString(p.Handle) - buf.AppendUint64(p.Offset) - buf.AppendUint32(p.Len) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *ReadPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err - } - - if p.Offset, err = buf.ConsumeUint64(); err != nil { - return err - } - - if p.Len, err = buf.ConsumeUint32(); err != nil { - return err - } - - return nil -} - -// WritePacket defines the SSH_FXP_WRITE packet. -type WritePacket struct { - Handle string - Offset uint64 - Data []byte -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *WritePacket) Type() PacketType { - return PacketTypeWrite -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *WritePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - // string(handle) + uint64(offset) + uint32(len(data)); data content in payload - size := 4 + len(p.Handle) + 8 + 4 - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeWrite, reqid) - buf.AppendString(p.Handle) - buf.AppendUint64(p.Offset) - buf.AppendUint32(uint32(len(p.Data))) - - return buf.Packet(p.Data) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -// -// If p.Data is already populated, and of sufficient length to hold the data, -// then this will copy the data into that byte slice. -// -// If p.Data has a length insufficient to hold the data, -// then this will make a new slice of sufficient length, and copy the data into that. -// -// This means this _does not_ alias any of the data buffer that is passed in. -func (p *WritePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err - } - - if p.Offset, err = buf.ConsumeUint64(); err != nil { - return err - } - - data, err := buf.ConsumeByteSlice() - if err != nil { - return err - } - - if len(p.Data) < len(data) { - p.Data = make([]byte, len(data)) - } - - n := copy(p.Data, data) - p.Data = p.Data[:n] - return nil -} - -// FStatPacket defines the SSH_FXP_FSTAT packet. -type FStatPacket struct { - Handle string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *FStatPacket) Type() PacketType { - return PacketTypeFStat -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *FStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Handle) // string(handle) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeFStat, reqid) - buf.AppendString(p.Handle) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *FStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// FSetstatPacket defines the SSH_FXP_FSETSTAT packet. -type FSetstatPacket struct { - Handle string - Attrs Attributes -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *FSetstatPacket) Type() PacketType { - return PacketTypeFSetstat -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *FSetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Handle) + p.Attrs.Len() // string(handle) + ATTRS(attrs) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeFSetstat, reqid) - buf.AppendString(p.Handle) - - p.Attrs.MarshalInto(buf) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *FSetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err - } - - return p.Attrs.UnmarshalFrom(buf) -} - -// ReadDirPacket defines the SSH_FXP_READDIR packet. -type ReadDirPacket struct { - Handle string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *ReadDirPacket) Type() PacketType { - return PacketTypeReadDir -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *ReadDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Handle) // string(handle) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeReadDir, reqid) - buf.AppendString(p.Handle) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *ReadDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go deleted file mode 100644 index b0bc6f50..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go +++ /dev/null @@ -1,99 +0,0 @@ -package filexfer - -// InitPacket defines the SSH_FXP_INIT packet. -type InitPacket struct { - Version uint32 - Extensions []*ExtensionPair -} - -// MarshalBinary returns p as the binary encoding of p. -func (p *InitPacket) MarshalBinary() ([]byte, error) { - size := 1 + 4 // byte(type) + uint32(version) - - for _, ext := range p.Extensions { - size += ext.Len() - } - - b := NewBuffer(make([]byte, 4, 4+size)) - b.AppendUint8(uint8(PacketTypeInit)) - b.AppendUint32(p.Version) - - for _, ext := range p.Extensions { - ext.MarshalInto(b) - } - - b.PutLength(size) - - return b.Bytes(), nil -} - -// UnmarshalBinary unmarshals a full raw packet out of the given data. -// It is assumed that the uint32(length) has already been consumed to receive the data. -// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into. -func (p *InitPacket) UnmarshalBinary(data []byte) (err error) { - buf := NewBuffer(data) - - if p.Version, err = buf.ConsumeUint32(); err != nil { - return err - } - - for buf.Len() > 0 { - var ext ExtensionPair - if err := ext.UnmarshalFrom(buf); err != nil { - return err - } - - p.Extensions = append(p.Extensions, &ext) - } - - return nil -} - -// VersionPacket defines the SSH_FXP_VERSION packet. -type VersionPacket struct { - Version uint32 - Extensions []*ExtensionPair -} - -// MarshalBinary returns p as the binary encoding of p. -func (p *VersionPacket) MarshalBinary() ([]byte, error) { - size := 1 + 4 // byte(type) + uint32(version) - - for _, ext := range p.Extensions { - size += ext.Len() - } - - b := NewBuffer(make([]byte, 4, 4+size)) - b.AppendUint8(uint8(PacketTypeVersion)) - b.AppendUint32(p.Version) - - for _, ext := range p.Extensions { - ext.MarshalInto(b) - } - - b.PutLength(size) - - return b.Bytes(), nil -} - -// UnmarshalBinary unmarshals a full raw packet out of the given data. -// It is assumed that the uint32(length) has already been consumed to receive the data. -// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into. -func (p *VersionPacket) UnmarshalBinary(data []byte) (err error) { - buf := NewBuffer(data) - - if p.Version, err = buf.ConsumeUint32(); err != nil { - return err - } - - for buf.Len() > 0 { - var ext ExtensionPair - if err := ext.UnmarshalFrom(buf); err != nil { - return err - } - - p.Extensions = append(p.Extensions, &ext) - } - - return nil -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go deleted file mode 100644 index 13587114..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go +++ /dev/null @@ -1,89 +0,0 @@ -package filexfer - -// SSH_FXF_* flags. -const ( - FlagRead = 1 << iota // SSH_FXF_READ - FlagWrite // SSH_FXF_WRITE - FlagAppend // SSH_FXF_APPEND - FlagCreate // SSH_FXF_CREAT - FlagTruncate // SSH_FXF_TRUNC - FlagExclusive // SSH_FXF_EXCL -) - -// OpenPacket defines the SSH_FXP_OPEN packet. -type OpenPacket struct { - Filename string - PFlags uint32 - Attrs Attributes -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *OpenPacket) Type() PacketType { - return PacketTypeOpen -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *OpenPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - // string(filename) + uint32(pflags) + ATTRS(attrs) - size := 4 + len(p.Filename) + 4 + p.Attrs.Len() - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeOpen, reqid) - buf.AppendString(p.Filename) - buf.AppendUint32(p.PFlags) - - p.Attrs.MarshalInto(buf) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *OpenPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Filename, err = buf.ConsumeString(); err != nil { - return err - } - - if p.PFlags, err = buf.ConsumeUint32(); err != nil { - return err - } - - return p.Attrs.UnmarshalFrom(buf) -} - -// OpenDirPacket defines the SSH_FXP_OPENDIR packet. -type OpenDirPacket struct { - Path string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *OpenDirPacket) Type() PacketType { - return PacketTypeOpenDir -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *OpenDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Path) // string(path) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeOpenDir, reqid) - buf.AppendString(p.Path) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *OpenDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go deleted file mode 100644 index 3f24e9c2..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go +++ /dev/null @@ -1,323 +0,0 @@ -package filexfer - -import ( - "errors" - "fmt" - "io" -) - -// smallBufferSize is an initial allocation minimal capacity. -const smallBufferSize = 64 - -func newPacketFromType(typ PacketType) (Packet, error) { - switch typ { - case PacketTypeOpen: - return new(OpenPacket), nil - case PacketTypeClose: - return new(ClosePacket), nil - case PacketTypeRead: - return new(ReadPacket), nil - case PacketTypeWrite: - return new(WritePacket), nil - case PacketTypeLStat: - return new(LStatPacket), nil - case PacketTypeFStat: - return new(FStatPacket), nil - case PacketTypeSetstat: - return new(SetstatPacket), nil - case PacketTypeFSetstat: - return new(FSetstatPacket), nil - case PacketTypeOpenDir: - return new(OpenDirPacket), nil - case PacketTypeReadDir: - return new(ReadDirPacket), nil - case PacketTypeRemove: - return new(RemovePacket), nil - case PacketTypeMkdir: - return new(MkdirPacket), nil - case PacketTypeRmdir: - return new(RmdirPacket), nil - case PacketTypeRealPath: - return new(RealPathPacket), nil - case PacketTypeStat: - return new(StatPacket), nil - case PacketTypeRename: - return new(RenamePacket), nil - case PacketTypeReadLink: - return new(ReadLinkPacket), nil - case PacketTypeSymlink: - return new(SymlinkPacket), nil - case PacketTypeExtended: - return new(ExtendedPacket), nil - default: - return nil, fmt.Errorf("unexpected request packet type: %v", typ) - } -} - -// RawPacket implements the general packet format from draft-ietf-secsh-filexfer-02 -// -// RawPacket is intended for use in clients receiving responses, -// where a response will be expected to be of a limited number of types, -// and unmarshaling unknown/unexpected response packets is unnecessary. -// -// For servers expecting to receive arbitrary request packet types, -// use RequestPacket. -// -// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 -type RawPacket struct { - PacketType PacketType - RequestID uint32 - - Data Buffer -} - -// Type returns the Type field defining the SSH_FXP_xy type for this packet. -func (p *RawPacket) Type() PacketType { - return p.PacketType -} - -// Reset clears the pointers and reference-semantic variables of RawPacket, -// releasing underlying resources, and making them and the RawPacket suitable to be reused, -// so long as no other references have been kept. -func (p *RawPacket) Reset() { - p.Data = Buffer{} -} - -// MarshalPacket returns p as a two-part binary encoding of p. -// -// The internal p.RequestID is overridden by the reqid argument. -func (p *RawPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - buf = NewMarshalBuffer(0) - } - - buf.StartPacket(p.PacketType, reqid) - - return buf.Packet(p.Data.Bytes()) -} - -// MarshalBinary returns p as the binary encoding of p. -// -// This is a convenience implementation primarily intended for tests, -// because it is inefficient with allocations. -func (p *RawPacket) MarshalBinary() ([]byte, error) { - return ComposePacket(p.MarshalPacket(p.RequestID, nil)) -} - -// UnmarshalFrom decodes a RawPacket from the given Buffer into p. -// -// The Data field will alias the passed in Buffer, -// so the buffer passed in should not be reused before RawPacket.Reset(). -func (p *RawPacket) UnmarshalFrom(buf *Buffer) error { - typ, err := buf.ConsumeUint8() - if err != nil { - return err - } - - p.PacketType = PacketType(typ) - - if p.RequestID, err = buf.ConsumeUint32(); err != nil { - return err - } - - p.Data = *buf - return nil -} - -// UnmarshalBinary decodes a full raw packet out of the given data. -// It is assumed that the uint32(length) has already been consumed to receive the data. -// -// This is a convenience implementation primarily intended for tests, -// because this must clone the given data byte slice, -// as Data is not allowed to alias any part of the data byte slice. -func (p *RawPacket) UnmarshalBinary(data []byte) error { - clone := make([]byte, len(data)) - n := copy(clone, data) - return p.UnmarshalFrom(NewBuffer(clone[:n])) -} - -// readPacket reads a uint32 length-prefixed binary data packet from r. -// using the given byte slice as a backing array. -// -// If the packet length read from r is bigger than maxPacketLength, -// or greater than math.MaxInt32 on a 32-bit implementation, -// then a `ErrLongPacket` error will be returned. -// -// If the given byte slice is insufficient to hold the packet, -// then it will be extended to fill the packet size. -func readPacket(r io.Reader, b []byte, maxPacketLength uint32) ([]byte, error) { - if cap(b) < 4 { - // We will need allocate our own buffer just for reading the packet length. - - // However, we don’t really want to allocate an extremely narrow buffer (4-bytes), - // and cause unnecessary allocation churn from both length reads and small packet reads, - // so we use smallBufferSize from the bytes package as a reasonable guess. - - // But if callers really do want to force narrow throw-away allocation of every packet body, - // they can do so with a buffer of capacity 4. - b = make([]byte, smallBufferSize) - } - - if _, err := io.ReadFull(r, b[:4]); err != nil { - return nil, err - } - - length := unmarshalUint32(b) - if int(length) < 5 { - // Must have at least uint8(type) and uint32(request-id) - - if int(length) < 0 { - // Only possible when strconv.IntSize == 32, - // the packet length is longer than math.MaxInt32, - // and thus longer than any possible slice. - return nil, ErrLongPacket - } - - return nil, ErrShortPacket - } - if length > maxPacketLength { - return nil, ErrLongPacket - } - - if int(length) > cap(b) { - // We know int(length) must be positive, because of tests above. - b = make([]byte, length) - } - - n, err := io.ReadFull(r, b[:length]) - return b[:n], err -} - -// ReadFrom provides a simple functional packet reader, -// using the given byte slice as a backing array. -// -// To protect against potential denial of service attacks, -// if the read packet length is longer than maxPacketLength, -// then no packet data will be read, and ErrLongPacket will be returned. -// (On 32-bit int architectures, all packets >= 2^31 in length -// will return ErrLongPacket regardless of maxPacketLength.) -// -// If the read packet length is longer than cap(b), -// then a throw-away slice will allocated to meet the exact packet length. -// This can be used to limit the length of reused buffers, -// while still allowing reception of occasional large packets. -// -// The Data field may alias the passed in byte slice, -// so the byte slice passed in should not be reused before RawPacket.Reset(). -func (p *RawPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error { - b, err := readPacket(r, b, maxPacketLength) - if err != nil { - return err - } - - return p.UnmarshalFrom(NewBuffer(b)) -} - -// RequestPacket implements the general packet format from draft-ietf-secsh-filexfer-02 -// but also automatically decode/encodes valid request packets (2 < type < 100 || type == 200). -// -// RequestPacket is intended for use in servers receiving requests, -// where any arbitrary request may be received, and so decoding them automatically -// is useful. -// -// For clients expecting to receive specific response packet types, -// where automatic unmarshaling of the packet body does not make sense, -// use RawPacket. -// -// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 -type RequestPacket struct { - RequestID uint32 - - Request Packet -} - -// Type returns the SSH_FXP_xy value associated with the underlying packet. -func (p *RequestPacket) Type() PacketType { - return p.Request.Type() -} - -// Reset clears the pointers and reference-semantic variables in RequestPacket, -// releasing underlying resources, and making them and the RequestPacket suitable to be reused, -// so long as no other references have been kept. -func (p *RequestPacket) Reset() { - p.Request = nil -} - -// MarshalPacket returns p as a two-part binary encoding of p. -// -// The internal p.RequestID is overridden by the reqid argument. -func (p *RequestPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - if p.Request == nil { - return nil, nil, errors.New("empty request packet") - } - - return p.Request.MarshalPacket(reqid, b) -} - -// MarshalBinary returns p as the binary encoding of p. -// -// This is a convenience implementation primarily intended for tests, -// because it is inefficient with allocations. -func (p *RequestPacket) MarshalBinary() ([]byte, error) { - return ComposePacket(p.MarshalPacket(p.RequestID, nil)) -} - -// UnmarshalFrom decodes a RequestPacket from the given Buffer into p. -// -// The Request field may alias the passed in Buffer, (e.g. SSH_FXP_WRITE), -// so the buffer passed in should not be reused before RequestPacket.Reset(). -func (p *RequestPacket) UnmarshalFrom(buf *Buffer) error { - typ, err := buf.ConsumeUint8() - if err != nil { - return err - } - - p.Request, err = newPacketFromType(PacketType(typ)) - if err != nil { - return err - } - - if p.RequestID, err = buf.ConsumeUint32(); err != nil { - return err - } - - return p.Request.UnmarshalPacketBody(buf) -} - -// UnmarshalBinary decodes a full request packet out of the given data. -// It is assumed that the uint32(length) has already been consumed to receive the data. -// -// This is a convenience implementation primarily intended for tests, -// because this must clone the given data byte slice, -// as Request is not allowed to alias any part of the data byte slice. -func (p *RequestPacket) UnmarshalBinary(data []byte) error { - clone := make([]byte, len(data)) - n := copy(clone, data) - return p.UnmarshalFrom(NewBuffer(clone[:n])) -} - -// ReadFrom provides a simple functional packet reader, -// using the given byte slice as a backing array. -// -// To protect against potential denial of service attacks, -// if the read packet length is longer than maxPacketLength, -// then no packet data will be read, and ErrLongPacket will be returned. -// (On 32-bit int architectures, all packets >= 2^31 in length -// will return ErrLongPacket regardless of maxPacketLength.) -// -// If the read packet length is longer than cap(b), -// then a throw-away slice will allocated to meet the exact packet length. -// This can be used to limit the length of reused buffers, -// while still allowing reception of occasional large packets. -// -// The Request field may alias the passed in byte slice, -// so the byte slice passed in should not be reused before RawPacket.Reset(). -func (p *RequestPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error { - b, err := readPacket(r, b, maxPacketLength) - if err != nil { - return err - } - - return p.UnmarshalFrom(NewBuffer(b)) -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go deleted file mode 100644 index e6f692d9..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go +++ /dev/null @@ -1,368 +0,0 @@ -package filexfer - -// LStatPacket defines the SSH_FXP_LSTAT packet. -type LStatPacket struct { - Path string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *LStatPacket) Type() PacketType { - return PacketTypeLStat -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *LStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Path) // string(path) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeLStat, reqid) - buf.AppendString(p.Path) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *LStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// SetstatPacket defines the SSH_FXP_SETSTAT packet. -type SetstatPacket struct { - Path string - Attrs Attributes -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *SetstatPacket) Type() PacketType { - return PacketTypeSetstat -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *SetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeSetstat, reqid) - buf.AppendString(p.Path) - - p.Attrs.MarshalInto(buf) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *SetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err - } - - return p.Attrs.UnmarshalFrom(buf) -} - -// RemovePacket defines the SSH_FXP_REMOVE packet. -type RemovePacket struct { - Path string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *RemovePacket) Type() PacketType { - return PacketTypeRemove -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *RemovePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Path) // string(path) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeRemove, reqid) - buf.AppendString(p.Path) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *RemovePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// MkdirPacket defines the SSH_FXP_MKDIR packet. -type MkdirPacket struct { - Path string - Attrs Attributes -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *MkdirPacket) Type() PacketType { - return PacketTypeMkdir -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *MkdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeMkdir, reqid) - buf.AppendString(p.Path) - - p.Attrs.MarshalInto(buf) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *MkdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err - } - - return p.Attrs.UnmarshalFrom(buf) -} - -// RmdirPacket defines the SSH_FXP_RMDIR packet. -type RmdirPacket struct { - Path string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *RmdirPacket) Type() PacketType { - return PacketTypeRmdir -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *RmdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Path) // string(path) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeRmdir, reqid) - buf.AppendString(p.Path) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *RmdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// RealPathPacket defines the SSH_FXP_REALPATH packet. -type RealPathPacket struct { - Path string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *RealPathPacket) Type() PacketType { - return PacketTypeRealPath -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *RealPathPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Path) // string(path) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeRealPath, reqid) - buf.AppendString(p.Path) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *RealPathPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// StatPacket defines the SSH_FXP_STAT packet. -type StatPacket struct { - Path string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *StatPacket) Type() PacketType { - return PacketTypeStat -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *StatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Path) // string(path) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeStat, reqid) - buf.AppendString(p.Path) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *StatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// RenamePacket defines the SSH_FXP_RENAME packet. -type RenamePacket struct { - OldPath string - NewPath string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *RenamePacket) Type() PacketType { - return PacketTypeRename -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *RenamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - // string(oldpath) + string(newpath) - size := 4 + len(p.OldPath) + 4 + len(p.NewPath) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeRename, reqid) - buf.AppendString(p.OldPath) - buf.AppendString(p.NewPath) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *RenamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.OldPath, err = buf.ConsumeString(); err != nil { - return err - } - - if p.NewPath, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// ReadLinkPacket defines the SSH_FXP_READLINK packet. -type ReadLinkPacket struct { - Path string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *ReadLinkPacket) Type() PacketType { - return PacketTypeReadLink -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *ReadLinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - size := 4 + len(p.Path) // string(path) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeReadLink, reqid) - buf.AppendString(p.Path) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *ReadLinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} - -// SymlinkPacket defines the SSH_FXP_SYMLINK packet. -// -// The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed. -// Unfortunately, the reversal was not noticed until the server was widely deployed. -// Covered in Section 3.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL -type SymlinkPacket struct { - LinkPath string - TargetPath string -} - -// Type returns the SSH_FXP_xy value associated with this packet type. -func (p *SymlinkPacket) Type() PacketType { - return PacketTypeSymlink -} - -// MarshalPacket returns p as a two-part binary encoding of p. -func (p *SymlinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { - buf := NewBuffer(b) - if buf.Cap() < 9 { - // string(targetpath) + string(linkpath) - size := 4 + len(p.TargetPath) + 4 + len(p.LinkPath) - buf = NewMarshalBuffer(size) - } - - buf.StartPacket(PacketTypeSymlink, reqid) - - // Arguments were inadvertently reversed. - buf.AppendString(p.TargetPath) - buf.AppendString(p.LinkPath) - - return buf.Packet(payload) -} - -// UnmarshalPacketBody unmarshals the packet body from the given Buffer. -// It is assumed that the uint32(request-id) has already been consumed. -func (p *SymlinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - // Arguments were inadvertently reversed. - if p.TargetPath, err = buf.ConsumeString(); err != nil { - return err - } - - if p.LinkPath, err = buf.ConsumeString(); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go deleted file mode 100644 index 2fe63d59..00000000 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go +++ /dev/null @@ -1,114 +0,0 @@ -package filexfer - -// FileMode represents a file’s mode and permission bits. -// The bits are defined according to POSIX standards, -// and may not apply to the OS being built for. -type FileMode uint32 - -// Permission flags, defined here to avoid potential inconsistencies in individual OS implementations. -const ( - ModePerm FileMode = 0o0777 // S_IRWXU | S_IRWXG | S_IRWXO - ModeUserRead FileMode = 0o0400 // S_IRUSR - ModeUserWrite FileMode = 0o0200 // S_IWUSR - ModeUserExec FileMode = 0o0100 // S_IXUSR - ModeGroupRead FileMode = 0o0040 // S_IRGRP - ModeGroupWrite FileMode = 0o0020 // S_IWGRP - ModeGroupExec FileMode = 0o0010 // S_IXGRP - ModeOtherRead FileMode = 0o0004 // S_IROTH - ModeOtherWrite FileMode = 0o0002 // S_IWOTH - ModeOtherExec FileMode = 0o0001 // S_IXOTH - - ModeSetUID FileMode = 0o4000 // S_ISUID - ModeSetGID FileMode = 0o2000 // S_ISGID - ModeSticky FileMode = 0o1000 // S_ISVTX - - ModeType FileMode = 0xF000 // S_IFMT - ModeNamedPipe FileMode = 0x1000 // S_IFIFO - ModeCharDevice FileMode = 0x2000 // S_IFCHR - ModeDir FileMode = 0x4000 // S_IFDIR - ModeDevice FileMode = 0x6000 // S_IFBLK - ModeRegular FileMode = 0x8000 // S_IFREG - ModeSymlink FileMode = 0xA000 // S_IFLNK - ModeSocket FileMode = 0xC000 // S_IFSOCK -) - -// IsDir reports whether m describes a directory. -// That is, it tests for m.Type() == ModeDir. -func (m FileMode) IsDir() bool { - return (m & ModeType) == ModeDir -} - -// IsRegular reports whether m describes a regular file. -// That is, it tests for m.Type() == ModeRegular -func (m FileMode) IsRegular() bool { - return (m & ModeType) == ModeRegular -} - -// Perm returns the POSIX permission bits in m (m & ModePerm). -func (m FileMode) Perm() FileMode { - return (m & ModePerm) -} - -// Type returns the type bits in m (m & ModeType). -func (m FileMode) Type() FileMode { - return (m & ModeType) -} - -// String returns a `-rwxrwxrwx` style string representing the `ls -l` POSIX permissions string. -func (m FileMode) String() string { - var buf [10]byte - - switch m.Type() { - case ModeRegular: - buf[0] = '-' - case ModeDir: - buf[0] = 'd' - case ModeSymlink: - buf[0] = 'l' - case ModeDevice: - buf[0] = 'b' - case ModeCharDevice: - buf[0] = 'c' - case ModeNamedPipe: - buf[0] = 'p' - case ModeSocket: - buf[0] = 's' - default: - buf[0] = '?' - } - - const rwx = "rwxrwxrwx" - for i, c := range rwx { - if m&(1<>24), byte(v>>16), byte(v>>8), byte(v)) -} - -func marshalUint64(b []byte, v uint64) []byte { - return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v)) -} - -func marshalString(b []byte, v string) []byte { - return append(marshalUint32(b, uint32(len(v))), v...) -} - -func marshalFileInfo(b []byte, fi os.FileInfo) []byte { - // attributes variable struct, and also variable per protocol version - // spec version 3 attributes: - // uint32 flags - // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE - // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID - // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID - // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS - // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME - // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME - // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED - // string extended_type - // string extended_data - // ... more extended data (extended_type - extended_data pairs), - // so that number of pairs equals extended_count - - flags, fileStat := fileStatFromInfo(fi) - - b = marshalUint32(b, flags) - if flags&sshFileXferAttrSize != 0 { - b = marshalUint64(b, fileStat.Size) - } - if flags&sshFileXferAttrUIDGID != 0 { - b = marshalUint32(b, fileStat.UID) - b = marshalUint32(b, fileStat.GID) - } - if flags&sshFileXferAttrPermissions != 0 { - b = marshalUint32(b, fileStat.Mode) - } - if flags&sshFileXferAttrACmodTime != 0 { - b = marshalUint32(b, fileStat.Atime) - b = marshalUint32(b, fileStat.Mtime) - } - - return b -} - -func marshalStatus(b []byte, err StatusError) []byte { - b = marshalUint32(b, err.Code) - b = marshalString(b, err.msg) - b = marshalString(b, err.lang) - return b -} - -func marshal(b []byte, v interface{}) []byte { - if v == nil { - return b - } - switch v := v.(type) { - case uint8: - return append(b, v) - case uint32: - return marshalUint32(b, v) - case uint64: - return marshalUint64(b, v) - case string: - return marshalString(b, v) - case os.FileInfo: - return marshalFileInfo(b, v) - default: - switch d := reflect.ValueOf(v); d.Kind() { - case reflect.Struct: - for i, n := 0, d.NumField(); i < n; i++ { - b = marshal(b, d.Field(i).Interface()) - } - return b - case reflect.Slice: - for i, n := 0, d.Len(); i < n; i++ { - b = marshal(b, d.Index(i).Interface()) - } - return b - default: - panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v)) - } - } -} - -func unmarshalUint32(b []byte) (uint32, []byte) { - v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 - return v, b[4:] -} - -func unmarshalUint32Safe(b []byte) (uint32, []byte, error) { - var v uint32 - if len(b) < 4 { - return 0, nil, errShortPacket - } - v, b = unmarshalUint32(b) - return v, b, nil -} - -func unmarshalUint64(b []byte) (uint64, []byte) { - h, b := unmarshalUint32(b) - l, b := unmarshalUint32(b) - return uint64(h)<<32 | uint64(l), b -} - -func unmarshalUint64Safe(b []byte) (uint64, []byte, error) { - var v uint64 - if len(b) < 8 { - return 0, nil, errShortPacket - } - v, b = unmarshalUint64(b) - return v, b, nil -} - -func unmarshalString(b []byte) (string, []byte) { - n, b := unmarshalUint32(b) - return string(b[:n]), b[n:] -} - -func unmarshalStringSafe(b []byte) (string, []byte, error) { - n, b, err := unmarshalUint32Safe(b) - if err != nil { - return "", nil, err - } - if int64(n) > int64(len(b)) { - return "", nil, errShortPacket - } - return string(b[:n]), b[n:], nil -} - -func unmarshalAttrs(b []byte) (*FileStat, []byte) { - flags, b := unmarshalUint32(b) - return unmarshalFileStat(flags, b) -} - -func unmarshalFileStat(flags uint32, b []byte) (*FileStat, []byte) { - var fs FileStat - if flags&sshFileXferAttrSize == sshFileXferAttrSize { - fs.Size, b, _ = unmarshalUint64Safe(b) - } - if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { - fs.UID, b, _ = unmarshalUint32Safe(b) - } - if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { - fs.GID, b, _ = unmarshalUint32Safe(b) - } - if flags&sshFileXferAttrPermissions == sshFileXferAttrPermissions { - fs.Mode, b, _ = unmarshalUint32Safe(b) - } - if flags&sshFileXferAttrACmodTime == sshFileXferAttrACmodTime { - fs.Atime, b, _ = unmarshalUint32Safe(b) - fs.Mtime, b, _ = unmarshalUint32Safe(b) - } - if flags&sshFileXferAttrExtended == sshFileXferAttrExtended { - var count uint32 - count, b, _ = unmarshalUint32Safe(b) - ext := make([]StatExtended, count) - for i := uint32(0); i < count; i++ { - var typ string - var data string - typ, b, _ = unmarshalStringSafe(b) - data, b, _ = unmarshalStringSafe(b) - ext[i] = StatExtended{ - ExtType: typ, - ExtData: data, - } - } - fs.Extended = ext - } - return &fs, b -} - -func unmarshalStatus(id uint32, data []byte) error { - sid, data := unmarshalUint32(data) - if sid != id { - return &unexpectedIDErr{id, sid} - } - code, data := unmarshalUint32(data) - msg, data, _ := unmarshalStringSafe(data) - lang, _, _ := unmarshalStringSafe(data) - return &StatusError{ - Code: code, - msg: msg, - lang: lang, - } -} - -type packetMarshaler interface { - marshalPacket() (header, payload []byte, err error) -} - -func marshalPacket(m encoding.BinaryMarshaler) (header, payload []byte, err error) { - if m, ok := m.(packetMarshaler); ok { - return m.marshalPacket() - } - - header, err = m.MarshalBinary() - return -} - -// sendPacket marshals p according to RFC 4234. -func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { - header, payload, err := marshalPacket(m) - if err != nil { - return fmt.Errorf("binary marshaller failed: %w", err) - } - - length := len(header) + len(payload) - 4 // subtract the uint32(length) from the start - if debugDumpTxPacketBytes { - debug("send packet: %s %d bytes %x%x", fxp(header[4]), length, header[5:], payload) - } else if debugDumpTxPacket { - debug("send packet: %s %d bytes", fxp(header[4]), length) - } - - binary.BigEndian.PutUint32(header[:4], uint32(length)) - - if _, err := w.Write(header); err != nil { - return fmt.Errorf("failed to send packet: %w", err) - } - - if len(payload) > 0 { - if _, err := w.Write(payload); err != nil { - return fmt.Errorf("failed to send packet payload: %w", err) - } - } - - return nil -} - -func recvPacket(r io.Reader, alloc *allocator, orderID uint32) (uint8, []byte, error) { - var b []byte - if alloc != nil { - b = alloc.GetPage(orderID) - } else { - b = make([]byte, 4) - } - if _, err := io.ReadFull(r, b[:4]); err != nil { - return 0, nil, err - } - length, _ := unmarshalUint32(b) - if length > maxMsgLength { - debug("recv packet %d bytes too long", length) - return 0, nil, errLongPacket - } - if length == 0 { - debug("recv packet of 0 bytes too short") - return 0, nil, errShortPacket - } - if alloc == nil { - b = make([]byte, length) - } - if _, err := io.ReadFull(r, b[:length]); err != nil { - debug("recv packet %d bytes: err %v", length, err) - return 0, nil, err - } - if debugDumpRxPacketBytes { - debug("recv packet: %s %d bytes %x", fxp(b[0]), length, b[1:length]) - } else if debugDumpRxPacket { - debug("recv packet: %s %d bytes", fxp(b[0]), length) - } - return b[0], b[1:length], nil -} - -type extensionPair struct { - Name string - Data string -} - -func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) { - var ep extensionPair - var err error - ep.Name, b, err = unmarshalStringSafe(b) - if err != nil { - return ep, b, err - } - ep.Data, b, err = unmarshalStringSafe(b) - return ep, b, err -} - -// Here starts the definition of packets along with their MarshalBinary -// implementations. -// Manually writing the marshalling logic wins us a lot of time and -// allocation. - -type sshFxInitPacket struct { - Version uint32 - Extensions []extensionPair -} - -func (p *sshFxInitPacket) MarshalBinary() ([]byte, error) { - l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(version) - for _, e := range p.Extensions { - l += 4 + len(e.Name) + 4 + len(e.Data) - } - - b := make([]byte, 4, l) - b = append(b, sshFxpInit) - b = marshalUint32(b, p.Version) - - for _, e := range p.Extensions { - b = marshalString(b, e.Name) - b = marshalString(b, e.Data) - } - - return b, nil -} - -func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error { - var err error - if p.Version, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - for len(b) > 0 { - var ep extensionPair - ep, b, err = unmarshalExtensionPair(b) - if err != nil { - return err - } - p.Extensions = append(p.Extensions, ep) - } - return nil -} - -type sshFxVersionPacket struct { - Version uint32 - Extensions []sshExtensionPair -} - -type sshExtensionPair struct { - Name, Data string -} - -func (p *sshFxVersionPacket) MarshalBinary() ([]byte, error) { - l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(version) - for _, e := range p.Extensions { - l += 4 + len(e.Name) + 4 + len(e.Data) - } - - b := make([]byte, 4, l) - b = append(b, sshFxpVersion) - b = marshalUint32(b, p.Version) - - for _, e := range p.Extensions { - b = marshalString(b, e.Name) - b = marshalString(b, e.Data) - } - - return b, nil -} - -func marshalIDStringPacket(packetType byte, id uint32, str string) ([]byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(str) - - b := make([]byte, 4, l) - b = append(b, packetType) - b = marshalUint32(b, id) - b = marshalString(b, str) - - return b, nil -} - -func unmarshalIDString(b []byte, id *uint32, str *string) error { - var err error - *id, b, err = unmarshalUint32Safe(b) - if err != nil { - return err - } - *str, _, err = unmarshalStringSafe(b) - return err -} - -type sshFxpReaddirPacket struct { - ID uint32 - Handle string -} - -func (p *sshFxpReaddirPacket) id() uint32 { return p.ID } - -func (p *sshFxpReaddirPacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpReaddir, p.ID, p.Handle) -} - -func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Handle) -} - -type sshFxpOpendirPacket struct { - ID uint32 - Path string -} - -func (p *sshFxpOpendirPacket) id() uint32 { return p.ID } - -func (p *sshFxpOpendirPacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpOpendir, p.ID, p.Path) -} - -func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpLstatPacket struct { - ID uint32 - Path string -} - -func (p *sshFxpLstatPacket) id() uint32 { return p.ID } - -func (p *sshFxpLstatPacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpLstat, p.ID, p.Path) -} - -func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpStatPacket struct { - ID uint32 - Path string -} - -func (p *sshFxpStatPacket) id() uint32 { return p.ID } - -func (p *sshFxpStatPacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpStat, p.ID, p.Path) -} - -func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpFstatPacket struct { - ID uint32 - Handle string -} - -func (p *sshFxpFstatPacket) id() uint32 { return p.ID } - -func (p *sshFxpFstatPacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpFstat, p.ID, p.Handle) -} - -func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Handle) -} - -type sshFxpClosePacket struct { - ID uint32 - Handle string -} - -func (p *sshFxpClosePacket) id() uint32 { return p.ID } - -func (p *sshFxpClosePacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpClose, p.ID, p.Handle) -} - -func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Handle) -} - -type sshFxpRemovePacket struct { - ID uint32 - Filename string -} - -func (p *sshFxpRemovePacket) id() uint32 { return p.ID } - -func (p *sshFxpRemovePacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpRemove, p.ID, p.Filename) -} - -func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Filename) -} - -type sshFxpRmdirPacket struct { - ID uint32 - Path string -} - -func (p *sshFxpRmdirPacket) id() uint32 { return p.ID } - -func (p *sshFxpRmdirPacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpRmdir, p.ID, p.Path) -} - -func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpSymlinkPacket struct { - ID uint32 - Targetpath string - Linkpath string -} - -func (p *sshFxpSymlinkPacket) id() uint32 { return p.ID } - -func (p *sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(p.Targetpath) + - 4 + len(p.Linkpath) - - b := make([]byte, 4, l) - b = append(b, sshFxpSymlink) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Targetpath) - b = marshalString(b, p.Linkpath) - - return b, nil -} - -func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Linkpath, _, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -type sshFxpHardlinkPacket struct { - ID uint32 - Oldpath string - Newpath string -} - -func (p *sshFxpHardlinkPacket) id() uint32 { return p.ID } - -func (p *sshFxpHardlinkPacket) MarshalBinary() ([]byte, error) { - const ext = "hardlink@openssh.com" - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(ext) + - 4 + len(p.Oldpath) + - 4 + len(p.Newpath) - - b := make([]byte, 4, l) - b = append(b, sshFxpExtended) - b = marshalUint32(b, p.ID) - b = marshalString(b, ext) - b = marshalString(b, p.Oldpath) - b = marshalString(b, p.Newpath) - - return b, nil -} - -type sshFxpReadlinkPacket struct { - ID uint32 - Path string -} - -func (p *sshFxpReadlinkPacket) id() uint32 { return p.ID } - -func (p *sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpReadlink, p.ID, p.Path) -} - -func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpRealpathPacket struct { - ID uint32 - Path string -} - -func (p *sshFxpRealpathPacket) id() uint32 { return p.ID } - -func (p *sshFxpRealpathPacket) MarshalBinary() ([]byte, error) { - return marshalIDStringPacket(sshFxpRealpath, p.ID, p.Path) -} - -func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpNameAttr struct { - Name string - LongName string - Attrs []interface{} -} - -func (p *sshFxpNameAttr) MarshalBinary() ([]byte, error) { - var b []byte - b = marshalString(b, p.Name) - b = marshalString(b, p.LongName) - for _, attr := range p.Attrs { - b = marshal(b, attr) - } - return b, nil -} - -type sshFxpNamePacket struct { - ID uint32 - NameAttrs []*sshFxpNameAttr -} - -func (p *sshFxpNamePacket) marshalPacket() ([]byte, []byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 - - b := make([]byte, 4, l) - b = append(b, sshFxpName) - b = marshalUint32(b, p.ID) - b = marshalUint32(b, uint32(len(p.NameAttrs))) - - var payload []byte - for _, na := range p.NameAttrs { - ab, err := na.MarshalBinary() - if err != nil { - return nil, nil, err - } - - payload = append(payload, ab...) - } - - return b, payload, nil -} - -func (p *sshFxpNamePacket) MarshalBinary() ([]byte, error) { - header, payload, err := p.marshalPacket() - return append(header, payload...), err -} - -type sshFxpOpenPacket struct { - ID uint32 - Path string - Pflags uint32 - Flags uint32 // ignored -} - -func (p *sshFxpOpenPacket) id() uint32 { return p.ID } - -func (p *sshFxpOpenPacket) MarshalBinary() ([]byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(p.Path) + - 4 + 4 - - b := make([]byte, 4, l) - b = append(b, sshFxpOpen) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Path) - b = marshalUint32(b, p.Pflags) - b = marshalUint32(b, p.Flags) - - return b, nil -} - -func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { - return err - } - return nil -} - -type sshFxpReadPacket struct { - ID uint32 - Len uint32 - Offset uint64 - Handle string -} - -func (p *sshFxpReadPacket) id() uint32 { return p.ID } - -func (p *sshFxpReadPacket) MarshalBinary() ([]byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(p.Handle) + - 8 + 4 // uint64 + uint32 - - b := make([]byte, 4, l) - b = append(b, sshFxpRead) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - b = marshalUint64(b, p.Offset) - b = marshalUint32(b, p.Len) - - return b, nil -} - -func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { - return err - } else if p.Len, _, err = unmarshalUint32Safe(b); err != nil { - return err - } - return nil -} - -// We need allocate bigger slices with extra capacity to avoid a re-allocation in sshFxpDataPacket.MarshalBinary -// So, we need: uint32(length) + byte(type) + uint32(id) + uint32(data_length) -const dataHeaderLen = 4 + 1 + 4 + 4 - -func (p *sshFxpReadPacket) getDataSlice(alloc *allocator, orderID uint32) []byte { - dataLen := p.Len - if dataLen > maxTxPacket { - dataLen = maxTxPacket - } - - if alloc != nil { - // GetPage returns a slice with capacity = maxMsgLength this is enough to avoid new allocations in - // sshFxpDataPacket.MarshalBinary - return alloc.GetPage(orderID)[:dataLen] - } - - // allocate with extra space for the header - return make([]byte, dataLen, dataLen+dataHeaderLen) -} - -type sshFxpRenamePacket struct { - ID uint32 - Oldpath string - Newpath string -} - -func (p *sshFxpRenamePacket) id() uint32 { return p.ID } - -func (p *sshFxpRenamePacket) MarshalBinary() ([]byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(p.Oldpath) + - 4 + len(p.Newpath) - - b := make([]byte, 4, l) - b = append(b, sshFxpRename) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Oldpath) - b = marshalString(b, p.Newpath) - - return b, nil -} - -func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -type sshFxpPosixRenamePacket struct { - ID uint32 - Oldpath string - Newpath string -} - -func (p *sshFxpPosixRenamePacket) id() uint32 { return p.ID } - -func (p *sshFxpPosixRenamePacket) MarshalBinary() ([]byte, error) { - const ext = "posix-rename@openssh.com" - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(ext) + - 4 + len(p.Oldpath) + - 4 + len(p.Newpath) - - b := make([]byte, 4, l) - b = append(b, sshFxpExtended) - b = marshalUint32(b, p.ID) - b = marshalString(b, ext) - b = marshalString(b, p.Oldpath) - b = marshalString(b, p.Newpath) - - return b, nil -} - -type sshFxpWritePacket struct { - ID uint32 - Length uint32 - Offset uint64 - Handle string - Data []byte -} - -func (p *sshFxpWritePacket) id() uint32 { return p.ID } - -func (p *sshFxpWritePacket) marshalPacket() ([]byte, []byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(p.Handle) + - 8 + // uint64 - 4 - - b := make([]byte, 4, l) - b = append(b, sshFxpWrite) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - b = marshalUint64(b, p.Offset) - b = marshalUint32(b, p.Length) - - return b, p.Data, nil -} - -func (p *sshFxpWritePacket) MarshalBinary() ([]byte, error) { - header, payload, err := p.marshalPacket() - return append(header, payload...), err -} - -func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { - return err - } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if uint32(len(b)) < p.Length { - return errShortPacket - } - - p.Data = b[:p.Length] - return nil -} - -type sshFxpMkdirPacket struct { - ID uint32 - Flags uint32 // ignored - Path string -} - -func (p *sshFxpMkdirPacket) id() uint32 { return p.ID } - -func (p *sshFxpMkdirPacket) MarshalBinary() ([]byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(p.Path) + - 4 // uint32 - - b := make([]byte, 4, l) - b = append(b, sshFxpMkdir) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Path) - b = marshalUint32(b, p.Flags) - - return b, nil -} - -func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { - return err - } - return nil -} - -type sshFxpSetstatPacket struct { - ID uint32 - Flags uint32 - Path string - Attrs interface{} -} - -type sshFxpFsetstatPacket struct { - ID uint32 - Flags uint32 - Handle string - Attrs interface{} -} - -func (p *sshFxpSetstatPacket) id() uint32 { return p.ID } -func (p *sshFxpFsetstatPacket) id() uint32 { return p.ID } - -func (p *sshFxpSetstatPacket) marshalPacket() ([]byte, []byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(p.Path) + - 4 // uint32 - - b := make([]byte, 4, l) - b = append(b, sshFxpSetstat) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Path) - b = marshalUint32(b, p.Flags) - - payload := marshal(nil, p.Attrs) - - return b, payload, nil -} - -func (p *sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { - header, payload, err := p.marshalPacket() - return append(header, payload...), err -} - -func (p *sshFxpFsetstatPacket) marshalPacket() ([]byte, []byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(p.Handle) + - 4 // uint32 - - b := make([]byte, 4, l) - b = append(b, sshFxpFsetstat) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - b = marshalUint32(b, p.Flags) - - payload := marshal(nil, p.Attrs) - - return b, payload, nil -} - -func (p *sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { - header, payload, err := p.marshalPacket() - return append(header, payload...), err -} - -func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - p.Attrs = b - return nil -} - -func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - p.Attrs = b - return nil -} - -type sshFxpHandlePacket struct { - ID uint32 - Handle string -} - -func (p *sshFxpHandlePacket) MarshalBinary() ([]byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(p.Handle) - - b := make([]byte, 4, l) - b = append(b, sshFxpHandle) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - - return b, nil -} - -type sshFxpStatusPacket struct { - ID uint32 - StatusError -} - -func (p *sshFxpStatusPacket) MarshalBinary() ([]byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + - 4 + len(p.StatusError.msg) + - 4 + len(p.StatusError.lang) - - b := make([]byte, 4, l) - b = append(b, sshFxpStatus) - b = marshalUint32(b, p.ID) - b = marshalStatus(b, p.StatusError) - - return b, nil -} - -type sshFxpDataPacket struct { - ID uint32 - Length uint32 - Data []byte -} - -func (p *sshFxpDataPacket) marshalPacket() ([]byte, []byte, error) { - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 - - b := make([]byte, 4, l) - b = append(b, sshFxpData) - b = marshalUint32(b, p.ID) - b = marshalUint32(b, p.Length) - - return b, p.Data, nil -} - -// MarshalBinary encodes the receiver into a binary form and returns the result. -// To avoid a new allocation the Data slice must have a capacity >= Length + 9 -// -// This is hand-coded rather than just append(header, payload...), -// in order to try and reuse the r.Data backing store in the packet. -func (p *sshFxpDataPacket) MarshalBinary() ([]byte, error) { - b := append(p.Data, make([]byte, dataHeaderLen)...) - copy(b[dataHeaderLen:], p.Data[:p.Length]) - // b[0:4] will be overwritten with the length in sendPacket - b[4] = sshFxpData - binary.BigEndian.PutUint32(b[5:9], p.ID) - binary.BigEndian.PutUint32(b[9:13], p.Length) - return b, nil -} - -func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if uint32(len(b)) < p.Length { - return errShortPacket - } - - p.Data = b[:p.Length] - return nil -} - -type sshFxpStatvfsPacket struct { - ID uint32 - Path string -} - -func (p *sshFxpStatvfsPacket) id() uint32 { return p.ID } - -func (p *sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) { - const ext = "statvfs@openssh.com" - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(ext) + - 4 + len(p.Path) - - b := make([]byte, 4, l) - b = append(b, sshFxpExtended) - b = marshalUint32(b, p.ID) - b = marshalString(b, ext) - b = marshalString(b, p.Path) - - return b, nil -} - -// A StatVFS contains statistics about a filesystem. -type StatVFS struct { - ID uint32 - Bsize uint64 /* file system block size */ - Frsize uint64 /* fundamental fs block size */ - Blocks uint64 /* number of blocks (unit f_frsize) */ - Bfree uint64 /* free blocks in file system */ - Bavail uint64 /* free blocks for non-root */ - Files uint64 /* total file inodes */ - Ffree uint64 /* free file inodes */ - Favail uint64 /* free file inodes for to non-root */ - Fsid uint64 /* file system id */ - Flag uint64 /* bit mask of f_flag values */ - Namemax uint64 /* maximum filename length */ -} - -// TotalSpace calculates the amount of total space in a filesystem. -func (p *StatVFS) TotalSpace() uint64 { - return p.Frsize * p.Blocks -} - -// FreeSpace calculates the amount of free space in a filesystem. -func (p *StatVFS) FreeSpace() uint64 { - return p.Frsize * p.Bfree -} - -// marshalPacket converts to ssh_FXP_EXTENDED_REPLY packet binary format -func (p *StatVFS) marshalPacket() ([]byte, []byte, error) { - header := []byte{0, 0, 0, 0, sshFxpExtendedReply} - - var buf bytes.Buffer - err := binary.Write(&buf, binary.BigEndian, p) - - return header, buf.Bytes(), err -} - -// MarshalBinary encodes the StatVFS as an SSH_FXP_EXTENDED_REPLY packet. -func (p *StatVFS) MarshalBinary() ([]byte, error) { - header, payload, err := p.marshalPacket() - return append(header, payload...), err -} - -type sshFxpFsyncPacket struct { - ID uint32 - Handle string -} - -func (p *sshFxpFsyncPacket) id() uint32 { return p.ID } - -func (p *sshFxpFsyncPacket) MarshalBinary() ([]byte, error) { - const ext = "fsync@openssh.com" - l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) - 4 + len(ext) + - 4 + len(p.Handle) - - b := make([]byte, 4, l) - b = append(b, sshFxpExtended) - b = marshalUint32(b, p.ID) - b = marshalString(b, ext) - b = marshalString(b, p.Handle) - - return b, nil -} - -type sshFxpExtendedPacket struct { - ID uint32 - ExtendedRequest string - SpecificPacket interface { - serverRespondablePacket - readonly() bool - } -} - -func (p *sshFxpExtendedPacket) id() uint32 { return p.ID } -func (p *sshFxpExtendedPacket) readonly() bool { - if p.SpecificPacket == nil { - return true - } - return p.SpecificPacket.readonly() -} - -func (p *sshFxpExtendedPacket) respond(svr *Server) responsePacket { - if p.SpecificPacket == nil { - return statusFromError(p.ID, nil) - } - return p.SpecificPacket.respond(svr) -} - -func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { - var err error - bOrig := b - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.ExtendedRequest, _, err = unmarshalStringSafe(b); err != nil { - return err - } - - // specific unmarshalling - switch p.ExtendedRequest { - case "statvfs@openssh.com": - p.SpecificPacket = &sshFxpExtendedPacketStatVFS{} - case "posix-rename@openssh.com": - p.SpecificPacket = &sshFxpExtendedPacketPosixRename{} - case "hardlink@openssh.com": - p.SpecificPacket = &sshFxpExtendedPacketHardlink{} - default: - return fmt.Errorf("packet type %v: %w", p.SpecificPacket, errUnknownExtendedPacket) - } - - return p.SpecificPacket.UnmarshalBinary(bOrig) -} - -type sshFxpExtendedPacketStatVFS struct { - ID uint32 - ExtendedRequest string - Path string -} - -func (p *sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID } -func (p *sshFxpExtendedPacketStatVFS) readonly() bool { return true } -func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Path, _, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -type sshFxpExtendedPacketPosixRename struct { - ID uint32 - ExtendedRequest string - Oldpath string - Newpath string -} - -func (p *sshFxpExtendedPacketPosixRename) id() uint32 { return p.ID } -func (p *sshFxpExtendedPacketPosixRename) readonly() bool { return false } -func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -func (p *sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket { - err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) - return statusFromError(p.ID, err) -} - -type sshFxpExtendedPacketHardlink struct { - ID uint32 - ExtendedRequest string - Oldpath string - Newpath string -} - -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL -func (p *sshFxpExtendedPacketHardlink) id() uint32 { return p.ID } -func (p *sshFxpExtendedPacketHardlink) readonly() bool { return true } -func (p *sshFxpExtendedPacketHardlink) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -func (p *sshFxpExtendedPacketHardlink) respond(s *Server) responsePacket { - err := os.Link(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) - return statusFromError(p.ID, err) -} diff --git a/vendor/github.com/pkg/sftp/pool.go b/vendor/github.com/pkg/sftp/pool.go deleted file mode 100644 index 36126290..00000000 --- a/vendor/github.com/pkg/sftp/pool.go +++ /dev/null @@ -1,79 +0,0 @@ -package sftp - -// bufPool provides a pool of byte-slices to be reused in various parts of the package. -// It is safe to use concurrently through a pointer. -type bufPool struct { - ch chan []byte - blen int -} - -func newBufPool(depth, bufLen int) *bufPool { - return &bufPool{ - ch: make(chan []byte, depth), - blen: bufLen, - } -} - -func (p *bufPool) Get() []byte { - if p.blen <= 0 { - panic("bufPool: new buffer creation length must be greater than zero") - } - - for { - select { - case b := <-p.ch: - if cap(b) < p.blen { - // just in case: throw away any buffer with insufficient capacity. - continue - } - - return b[:p.blen] - - default: - return make([]byte, p.blen) - } - } -} - -func (p *bufPool) Put(b []byte) { - if p == nil { - // functional default: no reuse. - return - } - - if cap(b) < p.blen || cap(b) > p.blen*2 { - // DO NOT reuse buffers with insufficient capacity. - // This could cause panics when resizing to p.blen. - - // DO NOT reuse buffers with excessive capacity. - // This could cause memory leaks. - return - } - - select { - case p.ch <- b: - default: - } -} - -type resChanPool chan chan result - -func newResChanPool(depth int) resChanPool { - return make(chan chan result, depth) -} - -func (p resChanPool) Get() chan result { - select { - case ch := <-p: - return ch - default: - return make(chan result, 1) - } -} - -func (p resChanPool) Put(ch chan result) { - select { - case p <- ch: - default: - } -} diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go deleted file mode 100644 index b695528f..00000000 --- a/vendor/github.com/pkg/sftp/release.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !debug - -package sftp - -func debug(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/pkg/sftp/request-attrs.go b/vendor/github.com/pkg/sftp/request-attrs.go deleted file mode 100644 index b5c95b4a..00000000 --- a/vendor/github.com/pkg/sftp/request-attrs.go +++ /dev/null @@ -1,63 +0,0 @@ -package sftp - -// Methods on the Request object to make working with the Flags bitmasks and -// Attr(ibutes) byte blob easier. Use Pflags() when working with an Open/Write -// request and AttrFlags() and Attributes() when working with SetStat requests. -import "os" - -// FileOpenFlags defines Open and Write Flags. Correlate directly with with os.OpenFile flags -// (https://golang.org/pkg/os/#pkg-constants). -type FileOpenFlags struct { - Read, Write, Append, Creat, Trunc, Excl bool -} - -func newFileOpenFlags(flags uint32) FileOpenFlags { - return FileOpenFlags{ - Read: flags&sshFxfRead != 0, - Write: flags&sshFxfWrite != 0, - Append: flags&sshFxfAppend != 0, - Creat: flags&sshFxfCreat != 0, - Trunc: flags&sshFxfTrunc != 0, - Excl: flags&sshFxfExcl != 0, - } -} - -// Pflags converts the bitmap/uint32 from SFTP Open packet pflag values, -// into a FileOpenFlags struct with booleans set for flags set in bitmap. -func (r *Request) Pflags() FileOpenFlags { - return newFileOpenFlags(r.Flags) -} - -// FileAttrFlags that indicate whether SFTP file attributes were passed. When a flag is -// true the corresponding attribute should be available from the FileStat -// object returned by Attributes method. Used with SetStat. -type FileAttrFlags struct { - Size, UidGid, Permissions, Acmodtime bool -} - -func newFileAttrFlags(flags uint32) FileAttrFlags { - return FileAttrFlags{ - Size: (flags & sshFileXferAttrSize) != 0, - UidGid: (flags & sshFileXferAttrUIDGID) != 0, - Permissions: (flags & sshFileXferAttrPermissions) != 0, - Acmodtime: (flags & sshFileXferAttrACmodTime) != 0, - } -} - -// AttrFlags returns a FileAttrFlags boolean struct based on the -// bitmap/uint32 file attribute flags from the SFTP packaet. -func (r *Request) AttrFlags() FileAttrFlags { - return newFileAttrFlags(r.Flags) -} - -// FileMode returns the Mode SFTP file attributes wrapped as os.FileMode -func (a FileStat) FileMode() os.FileMode { - return os.FileMode(a.Mode) -} - -// Attributes parses file attributes byte blob and return them in a -// FileStat object. -func (r *Request) Attributes() *FileStat { - fs, _ := unmarshalFileStat(r.Flags, r.Attrs) - return fs -} diff --git a/vendor/github.com/pkg/sftp/request-errors.go b/vendor/github.com/pkg/sftp/request-errors.go deleted file mode 100644 index 6505b5c7..00000000 --- a/vendor/github.com/pkg/sftp/request-errors.go +++ /dev/null @@ -1,54 +0,0 @@ -package sftp - -type fxerr uint32 - -// Error types that match the SFTP's SSH_FXP_STATUS codes. Gives you more -// direct control of the errors being sent vs. letting the library work them -// out from the standard os/io errors. -const ( - ErrSSHFxOk = fxerr(sshFxOk) - ErrSSHFxEOF = fxerr(sshFxEOF) - ErrSSHFxNoSuchFile = fxerr(sshFxNoSuchFile) - ErrSSHFxPermissionDenied = fxerr(sshFxPermissionDenied) - ErrSSHFxFailure = fxerr(sshFxFailure) - ErrSSHFxBadMessage = fxerr(sshFxBadMessage) - ErrSSHFxNoConnection = fxerr(sshFxNoConnection) - ErrSSHFxConnectionLost = fxerr(sshFxConnectionLost) - ErrSSHFxOpUnsupported = fxerr(sshFxOPUnsupported) -) - -// Deprecated error types, these are aliases for the new ones, please use the new ones directly -const ( - ErrSshFxOk = ErrSSHFxOk - ErrSshFxEof = ErrSSHFxEOF - ErrSshFxNoSuchFile = ErrSSHFxNoSuchFile - ErrSshFxPermissionDenied = ErrSSHFxPermissionDenied - ErrSshFxFailure = ErrSSHFxFailure - ErrSshFxBadMessage = ErrSSHFxBadMessage - ErrSshFxNoConnection = ErrSSHFxNoConnection - ErrSshFxConnectionLost = ErrSSHFxConnectionLost - ErrSshFxOpUnsupported = ErrSSHFxOpUnsupported -) - -func (e fxerr) Error() string { - switch e { - case ErrSSHFxOk: - return "OK" - case ErrSSHFxEOF: - return "EOF" - case ErrSSHFxNoSuchFile: - return "no such file" - case ErrSSHFxPermissionDenied: - return "permission denied" - case ErrSSHFxBadMessage: - return "bad message" - case ErrSSHFxNoConnection: - return "no connection" - case ErrSSHFxConnectionLost: - return "connection lost" - case ErrSSHFxOpUnsupported: - return "operation unsupported" - default: - return "failure" - } -} diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go deleted file mode 100644 index ba22bcd0..00000000 --- a/vendor/github.com/pkg/sftp/request-example.go +++ /dev/null @@ -1,666 +0,0 @@ -package sftp - -// This serves as an example of how to implement the request server handler as -// well as a dummy backend for testing. It implements an in-memory backend that -// works as a very simple filesystem with simple flat key-value lookup system. - -import ( - "errors" - "io" - "os" - "path" - "sort" - "strings" - "sync" - "syscall" - "time" -) - -const maxSymlinkFollows = 5 - -var errTooManySymlinks = errors.New("too many symbolic links") - -// InMemHandler returns a Hanlders object with the test handlers. -func InMemHandler() Handlers { - root := &root{ - rootFile: &memFile{name: "/", modtime: time.Now(), isdir: true}, - files: make(map[string]*memFile), - } - return Handlers{root, root, root, root} -} - -// Example Handlers -func (fs *root) Fileread(r *Request) (io.ReaderAt, error) { - flags := r.Pflags() - if !flags.Read { - // sanity check - return nil, os.ErrInvalid - } - - return fs.OpenFile(r) -} - -func (fs *root) Filewrite(r *Request) (io.WriterAt, error) { - flags := r.Pflags() - if !flags.Write { - // sanity check - return nil, os.ErrInvalid - } - - return fs.OpenFile(r) -} - -func (fs *root) OpenFile(r *Request) (WriterAtReaderAt, error) { - if fs.mockErr != nil { - return nil, fs.mockErr - } - _ = r.WithContext(r.Context()) // initialize context for deadlock testing - - fs.mu.Lock() - defer fs.mu.Unlock() - - return fs.openfile(r.Filepath, r.Flags) -} - -func (fs *root) putfile(pathname string, file *memFile) error { - pathname, err := fs.canonName(pathname) - if err != nil { - return err - } - - if !strings.HasPrefix(pathname, "/") { - return os.ErrInvalid - } - - if _, err := fs.lfetch(pathname); err != os.ErrNotExist { - return os.ErrExist - } - - file.name = pathname - fs.files[pathname] = file - - return nil -} - -func (fs *root) openfile(pathname string, flags uint32) (*memFile, error) { - pflags := newFileOpenFlags(flags) - - file, err := fs.fetch(pathname) - if err == os.ErrNotExist { - if !pflags.Creat { - return nil, os.ErrNotExist - } - - var count int - // You can create files through dangling symlinks. - link, err := fs.lfetch(pathname) - for err == nil && link.symlink != "" { - if pflags.Excl { - // unless you also passed in O_EXCL - return nil, os.ErrInvalid - } - - if count++; count > maxSymlinkFollows { - return nil, errTooManySymlinks - } - - pathname = link.symlink - link, err = fs.lfetch(pathname) - } - - file := &memFile{ - modtime: time.Now(), - } - - if err := fs.putfile(pathname, file); err != nil { - return nil, err - } - - return file, nil - } - - if err != nil { - return nil, err - } - - if pflags.Creat && pflags.Excl { - return nil, os.ErrExist - } - - if file.IsDir() { - return nil, os.ErrInvalid - } - - if pflags.Trunc { - if err := file.Truncate(0); err != nil { - return nil, err - } - } - - return file, nil -} - -func (fs *root) Filecmd(r *Request) error { - if fs.mockErr != nil { - return fs.mockErr - } - _ = r.WithContext(r.Context()) // initialize context for deadlock testing - - fs.mu.Lock() - defer fs.mu.Unlock() - - switch r.Method { - case "Setstat": - file, err := fs.openfile(r.Filepath, sshFxfWrite) - if err != nil { - return err - } - - if r.AttrFlags().Size { - return file.Truncate(int64(r.Attributes().Size)) - } - - return nil - - case "Rename": - // SFTP-v2: "It is an error if there already exists a file with the name specified by newpath." - // This varies from the POSIX specification, which allows limited replacement of target files. - if fs.exists(r.Target) { - return os.ErrExist - } - - return fs.rename(r.Filepath, r.Target) - - case "Rmdir": - return fs.rmdir(r.Filepath) - - case "Remove": - // IEEE 1003.1 remove explicitly can unlink files and remove empty directories. - // We use instead here the semantics of unlink, which is allowed to be restricted against directories. - return fs.unlink(r.Filepath) - - case "Mkdir": - return fs.mkdir(r.Filepath) - - case "Link": - return fs.link(r.Filepath, r.Target) - - case "Symlink": - // NOTE: r.Filepath is the target, and r.Target is the linkpath. - return fs.symlink(r.Filepath, r.Target) - } - - return errors.New("unsupported") -} - -func (fs *root) rename(oldpath, newpath string) error { - file, err := fs.lfetch(oldpath) - if err != nil { - return err - } - - newpath, err = fs.canonName(newpath) - if err != nil { - return err - } - - if !strings.HasPrefix(newpath, "/") { - return os.ErrInvalid - } - - target, err := fs.lfetch(newpath) - if err != os.ErrNotExist { - if target == file { - // IEEE 1003.1: if oldpath and newpath are the same directory entry, - // then return no error, and perform no further action. - return nil - } - - switch { - case file.IsDir(): - // IEEE 1003.1: if oldpath is a directory, and newpath exists, - // then newpath must be a directory, and empty. - // It is to be removed prior to rename. - if err := fs.rmdir(newpath); err != nil { - return err - } - - case target.IsDir(): - // IEEE 1003.1: if oldpath is not a directory, and newpath exists, - // then newpath may not be a directory. - return syscall.EISDIR - } - } - - fs.files[newpath] = file - - if file.IsDir() { - dirprefix := file.name + "/" - - for name, file := range fs.files { - if strings.HasPrefix(name, dirprefix) { - newname := path.Join(newpath, strings.TrimPrefix(name, dirprefix)) - - fs.files[newname] = file - file.name = newname - delete(fs.files, name) - } - } - } - - file.name = newpath - delete(fs.files, oldpath) - - return nil -} - -func (fs *root) PosixRename(r *Request) error { - if fs.mockErr != nil { - return fs.mockErr - } - _ = r.WithContext(r.Context()) // initialize context for deadlock testing - - fs.mu.Lock() - defer fs.mu.Unlock() - - return fs.rename(r.Filepath, r.Target) -} - -func (fs *root) StatVFS(r *Request) (*StatVFS, error) { - if fs.mockErr != nil { - return nil, fs.mockErr - } - - return getStatVFSForPath(r.Filepath) -} - -func (fs *root) mkdir(pathname string) error { - dir := &memFile{ - modtime: time.Now(), - isdir: true, - } - - return fs.putfile(pathname, dir) -} - -func (fs *root) rmdir(pathname string) error { - // IEEE 1003.1: If pathname is a symlink, then rmdir should fail with ENOTDIR. - dir, err := fs.lfetch(pathname) - if err != nil { - return err - } - - if !dir.IsDir() { - return syscall.ENOTDIR - } - - // use the dir‘s internal name not the pathname we passed in. - // the dir.name is always the canonical name of a directory. - pathname = dir.name - - for name := range fs.files { - if path.Dir(name) == pathname { - return errors.New("directory not empty") - } - } - - delete(fs.files, pathname) - - return nil -} - -func (fs *root) link(oldpath, newpath string) error { - file, err := fs.lfetch(oldpath) - if err != nil { - return err - } - - if file.IsDir() { - return errors.New("hard link not allowed for directory") - } - - return fs.putfile(newpath, file) -} - -// symlink() creates a symbolic link named `linkpath` which contains the string `target`. -// NOTE! This would be called with `symlink(req.Filepath, req.Target)` due to different semantics. -func (fs *root) symlink(target, linkpath string) error { - link := &memFile{ - modtime: time.Now(), - symlink: target, - } - - return fs.putfile(linkpath, link) -} - -func (fs *root) unlink(pathname string) error { - // does not follow symlinks! - file, err := fs.lfetch(pathname) - if err != nil { - return err - } - - if file.IsDir() { - // IEEE 1003.1: implementations may opt out of allowing the unlinking of directories. - // SFTP-v2: SSH_FXP_REMOVE may not remove directories. - return os.ErrInvalid - } - - // DO NOT use the file’s internal name. - // because of hard-links files cannot have a single canonical name. - delete(fs.files, pathname) - - return nil -} - -type listerat []os.FileInfo - -// Modeled after strings.Reader's ReadAt() implementation -func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) { - var n int - if offset >= int64(len(f)) { - return 0, io.EOF - } - n = copy(ls, f[offset:]) - if n < len(ls) { - return n, io.EOF - } - return n, nil -} - -func (fs *root) Filelist(r *Request) (ListerAt, error) { - if fs.mockErr != nil { - return nil, fs.mockErr - } - _ = r.WithContext(r.Context()) // initialize context for deadlock testing - - fs.mu.Lock() - defer fs.mu.Unlock() - - switch r.Method { - case "List": - files, err := fs.readdir(r.Filepath) - if err != nil { - return nil, err - } - return listerat(files), nil - - case "Stat": - file, err := fs.fetch(r.Filepath) - if err != nil { - return nil, err - } - return listerat{file}, nil - - case "Readlink": - symlink, err := fs.readlink(r.Filepath) - if err != nil { - return nil, err - } - - // SFTP-v2: The server will respond with a SSH_FXP_NAME packet containing only - // one name and a dummy attributes value. - return listerat{ - &memFile{ - name: symlink, - err: os.ErrNotExist, // prevent accidental use as a reader/writer. - }, - }, nil - } - - return nil, errors.New("unsupported") -} - -func (fs *root) readdir(pathname string) ([]os.FileInfo, error) { - dir, err := fs.fetch(pathname) - if err != nil { - return nil, err - } - - if !dir.IsDir() { - return nil, syscall.ENOTDIR - } - - var files []os.FileInfo - - for name, file := range fs.files { - if path.Dir(name) == dir.name { - files = append(files, file) - } - } - - sort.Slice(files, func(i, j int) bool { return files[i].Name() < files[j].Name() }) - - return files, nil -} - -func (fs *root) readlink(pathname string) (string, error) { - file, err := fs.lfetch(pathname) - if err != nil { - return "", err - } - - if file.symlink == "" { - return "", os.ErrInvalid - } - - return file.symlink, nil -} - -// implements LstatFileLister interface -func (fs *root) Lstat(r *Request) (ListerAt, error) { - if fs.mockErr != nil { - return nil, fs.mockErr - } - _ = r.WithContext(r.Context()) // initialize context for deadlock testing - - fs.mu.Lock() - defer fs.mu.Unlock() - - file, err := fs.lfetch(r.Filepath) - if err != nil { - return nil, err - } - return listerat{file}, nil -} - -// implements RealpathFileLister interface -func (fs *root) Realpath(p string) string { - if fs.startDirectory == "" || fs.startDirectory == "/" { - return cleanPath(p) - } - return cleanPathWithBase(fs.startDirectory, p) -} - -// In memory file-system-y thing that the Hanlders live on -type root struct { - rootFile *memFile - mockErr error - startDirectory string - - mu sync.Mutex - files map[string]*memFile -} - -// Set a mocked error that the next handler call will return. -// Set to nil to reset for no error. -func (fs *root) returnErr(err error) { - fs.mockErr = err -} - -func (fs *root) lfetch(path string) (*memFile, error) { - if path == "/" { - return fs.rootFile, nil - } - - file, ok := fs.files[path] - if file == nil { - if ok { - delete(fs.files, path) - } - - return nil, os.ErrNotExist - } - - return file, nil -} - -// canonName returns the “canonical” name of a file, that is: -// if the directory of the pathname is a symlink, it follows that symlink to the valid directory name. -// this is relatively easy, since `dir.name` will be the only valid canonical path for a directory. -func (fs *root) canonName(pathname string) (string, error) { - dirname, filename := path.Dir(pathname), path.Base(pathname) - - dir, err := fs.fetch(dirname) - if err != nil { - return "", err - } - - if !dir.IsDir() { - return "", syscall.ENOTDIR - } - - return path.Join(dir.name, filename), nil -} - -func (fs *root) exists(path string) bool { - path, err := fs.canonName(path) - if err != nil { - return false - } - - _, err = fs.lfetch(path) - - return err != os.ErrNotExist -} - -func (fs *root) fetch(path string) (*memFile, error) { - file, err := fs.lfetch(path) - if err != nil { - return nil, err - } - - var count int - for file.symlink != "" { - if count++; count > maxSymlinkFollows { - return nil, errTooManySymlinks - } - - file, err = fs.lfetch(file.symlink) - if err != nil { - return nil, err - } - } - - return file, nil -} - -// Implements os.FileInfo, io.ReaderAt and io.WriterAt interfaces. -// These are the 3 interfaces necessary for the Handlers. -// Implements the optional interface TransferError. -type memFile struct { - name string - modtime time.Time - symlink string - isdir bool - - mu sync.RWMutex - content []byte - err error -} - -// These are helper functions, they must be called while holding the memFile.mu mutex -func (f *memFile) size() int64 { return int64(len(f.content)) } -func (f *memFile) grow(n int64) { f.content = append(f.content, make([]byte, n)...) } - -// Have memFile fulfill os.FileInfo interface -func (f *memFile) Name() string { return path.Base(f.name) } -func (f *memFile) Size() int64 { - f.mu.Lock() - defer f.mu.Unlock() - - return f.size() -} -func (f *memFile) Mode() os.FileMode { - if f.isdir { - return os.FileMode(0755) | os.ModeDir - } - if f.symlink != "" { - return os.FileMode(0777) | os.ModeSymlink - } - return os.FileMode(0644) -} -func (f *memFile) ModTime() time.Time { return f.modtime } -func (f *memFile) IsDir() bool { return f.isdir } -func (f *memFile) Sys() interface{} { - return fakeFileInfoSys() -} - -func (f *memFile) ReadAt(b []byte, off int64) (int, error) { - f.mu.Lock() - defer f.mu.Unlock() - - if f.err != nil { - return 0, f.err - } - - if off < 0 { - return 0, errors.New("memFile.ReadAt: negative offset") - } - - if off >= f.size() { - return 0, io.EOF - } - - n := copy(b, f.content[off:]) - if n < len(b) { - return n, io.EOF - } - - return n, nil -} - -func (f *memFile) WriteAt(b []byte, off int64) (int, error) { - // fmt.Println(string(p), off) - // mimic write delays, should be optional - time.Sleep(time.Microsecond * time.Duration(len(b))) - - f.mu.Lock() - defer f.mu.Unlock() - - if f.err != nil { - return 0, f.err - } - - grow := int64(len(b)) + off - f.size() - if grow > 0 { - f.grow(grow) - } - - return copy(f.content[off:], b), nil -} - -func (f *memFile) Truncate(size int64) error { - f.mu.Lock() - defer f.mu.Unlock() - - if f.err != nil { - return f.err - } - - grow := size - f.size() - if grow <= 0 { - f.content = f.content[:size] - } else { - f.grow(grow) - } - - return nil -} - -func (f *memFile) TransferError(err error) { - f.mu.Lock() - defer f.mu.Unlock() - - f.err = err -} diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go deleted file mode 100644 index e5dc49bb..00000000 --- a/vendor/github.com/pkg/sftp/request-interfaces.go +++ /dev/null @@ -1,123 +0,0 @@ -package sftp - -import ( - "io" - "os" -) - -// WriterAtReaderAt defines the interface to return when a file is to -// be opened for reading and writing -type WriterAtReaderAt interface { - io.WriterAt - io.ReaderAt -} - -// Interfaces are differentiated based on required returned values. -// All input arguments are to be pulled from Request (the only arg). - -// The Handler interfaces all take the Request object as its only argument. -// All the data you should need to handle the call are in the Request object. -// The request.Method attribute is initially the most important one as it -// determines which Handler gets called. - -// FileReader should return an io.ReaderAt for the filepath -// Note in cases of an error, the error text will be sent to the client. -// Called for Methods: Get -type FileReader interface { - Fileread(*Request) (io.ReaderAt, error) -} - -// FileWriter should return an io.WriterAt for the filepath. -// -// The request server code will call Close() on the returned io.WriterAt -// ojbect if an io.Closer type assertion succeeds. -// Note in cases of an error, the error text will be sent to the client. -// Note when receiving an Append flag it is important to not open files using -// O_APPEND if you plan to use WriteAt, as they conflict. -// Called for Methods: Put, Open -type FileWriter interface { - Filewrite(*Request) (io.WriterAt, error) -} - -// OpenFileWriter is a FileWriter that implements the generic OpenFile method. -// You need to implement this optional interface if you want to be able -// to read and write from/to the same handle. -// Called for Methods: Open -type OpenFileWriter interface { - FileWriter - OpenFile(*Request) (WriterAtReaderAt, error) -} - -// FileCmder should return an error -// Note in cases of an error, the error text will be sent to the client. -// Called for Methods: Setstat, Rename, Rmdir, Mkdir, Link, Symlink, Remove -type FileCmder interface { - Filecmd(*Request) error -} - -// PosixRenameFileCmder is a FileCmder that implements the PosixRename method. -// If this interface is implemented PosixRename requests will call it -// otherwise they will be handled in the same way as Rename -type PosixRenameFileCmder interface { - FileCmder - PosixRename(*Request) error -} - -// StatVFSFileCmder is a FileCmder that implements the StatVFS method. -// You need to implement this interface if you want to handle statvfs requests. -// Please also be sure that the statvfs@openssh.com extension is enabled -type StatVFSFileCmder interface { - FileCmder - StatVFS(*Request) (*StatVFS, error) -} - -// FileLister should return an object that fulfils the ListerAt interface -// Note in cases of an error, the error text will be sent to the client. -// Called for Methods: List, Stat, Readlink -type FileLister interface { - Filelist(*Request) (ListerAt, error) -} - -// LstatFileLister is a FileLister that implements the Lstat method. -// If this interface is implemented Lstat requests will call it -// otherwise they will be handled in the same way as Stat -type LstatFileLister interface { - FileLister - Lstat(*Request) (ListerAt, error) -} - -// RealPathFileLister is a FileLister that implements the Realpath method. -// We use "/" as start directory for relative paths, implementing this -// interface you can customize the start directory. -// You have to return an absolute POSIX path. -// -// Deprecated: if you want to set a start directory use WithStartDirectory RequestServerOption instead. -type RealPathFileLister interface { - FileLister - RealPath(string) string -} - -// NameLookupFileLister is a FileLister that implmeents the LookupUsername and LookupGroupName methods. -// If this interface is implemented, then longname ls formatting will use these to convert usernames and groupnames. -type NameLookupFileLister interface { - FileLister - LookupUserName(string) string - LookupGroupName(string) string -} - -// ListerAt does for file lists what io.ReaderAt does for files. -// ListAt should return the number of entries copied and an io.EOF -// error if at end of list. This is testable by comparing how many you -// copied to how many could be copied (eg. n < len(ls) below). -// The copy() builtin is best for the copying. -// Note in cases of an error, the error text will be sent to the client. -type ListerAt interface { - ListAt([]os.FileInfo, int64) (int, error) -} - -// TransferError is an optional interface that readerAt and writerAt -// can implement to be notified about the error causing Serve() to exit -// with the request still open -type TransferError interface { - TransferError(err error) -} diff --git a/vendor/github.com/pkg/sftp/request-plan9.go b/vendor/github.com/pkg/sftp/request-plan9.go deleted file mode 100644 index 2444da59..00000000 --- a/vendor/github.com/pkg/sftp/request-plan9.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build plan9 - -package sftp - -import ( - "path" - "path/filepath" - "syscall" -) - -func fakeFileInfoSys() interface{} { - return &syscall.Dir{} -} - -func testOsSys(sys interface{}) error { - return nil -} - -func toLocalPath(p string) string { - lp := filepath.FromSlash(p) - - if path.IsAbs(p) { - tmp := lp[1:] - - if filepath.IsAbs(tmp) { - // If the FromSlash without any starting slashes is absolute, - // then we have a filepath encoded with a prefix '/'. - // e.g. "/#s/boot" to "#s/boot" - return tmp - } - } - - return lp -} diff --git a/vendor/github.com/pkg/sftp/request-readme.md b/vendor/github.com/pkg/sftp/request-readme.md deleted file mode 100644 index f887274d..00000000 --- a/vendor/github.com/pkg/sftp/request-readme.md +++ /dev/null @@ -1,53 +0,0 @@ -# Request Based SFTP API - -The request based API allows for custom backends in a way similar to the http -package. In order to create a backend you need to implement 4 handler -interfaces; one for reading, one for writing, one for misc commands and one for -listing files. Each has 1 required method and in each case those methods take -the Request as the only parameter and they each return something different. -These 4 interfaces are enough to handle all the SFTP traffic in a simplified -manner. - -The Request structure has 5 public fields which you will deal with. - -- Method (string) - string name of incoming call -- Filepath (string) - POSIX path of file to act on -- Flags (uint32) - 32bit bitmask value of file open/create flags -- Attrs ([]byte) - byte string of file attribute data -- Target (string) - target path for renames and sym-links - -Below are the methods and a brief description of what they need to do. - -### Fileread(*Request) (io.Reader, error) - -Handler for "Get" method and returns an io.Reader for the file which the server -then sends to the client. - -### Filewrite(*Request) (io.Writer, error) - -Handler for "Put" method and returns an io.Writer for the file which the server -then writes the uploaded file to. The file opening "pflags" are currently -preserved in the Request.Flags field as a 32bit bitmask value. See the [SFTP -spec](https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.3) for -details. - -### Filecmd(*Request) error - -Handles "SetStat", "Rename", "Rmdir", "Mkdir" and "Symlink" methods. Makes the -appropriate changes and returns nil for success or an filesystem like error -(eg. os.ErrNotExist). The attributes are currently propagated in their raw form -([]byte) and will need to be unmarshalled to be useful. See the respond method -on sshFxpSetstatPacket for example of you might want to do this. - -### Fileinfo(*Request) ([]os.FileInfo, error) - -Handles "List", "Stat", "Readlink" methods. Gathers/creates FileInfo structs -with the data on the files and returns in a list (list of 1 for Stat and -Readlink). - - -## TODO - -- Add support for API users to see trace/debugging info of what is going on -inside SFTP server. -- Unmarshal the file attributes into a structure on the Request object. diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go deleted file mode 100644 index b7dadd6c..00000000 --- a/vendor/github.com/pkg/sftp/request-server.go +++ /dev/null @@ -1,328 +0,0 @@ -package sftp - -import ( - "context" - "errors" - "io" - "path" - "path/filepath" - "strconv" - "sync" -) - -var maxTxPacket uint32 = 1 << 15 - -// Handlers contains the 4 SFTP server request handlers. -type Handlers struct { - FileGet FileReader - FilePut FileWriter - FileCmd FileCmder - FileList FileLister -} - -// RequestServer abstracts the sftp protocol with an http request-like protocol -type RequestServer struct { - Handlers Handlers - - *serverConn - pktMgr *packetManager - - startDirectory string - - mu sync.RWMutex - handleCount int - openRequests map[string]*Request -} - -// A RequestServerOption is a function which applies configuration to a RequestServer. -type RequestServerOption func(*RequestServer) - -// WithRSAllocator enable the allocator. -// After processing a packet we keep in memory the allocated slices -// and we reuse them for new packets. -// The allocator is experimental -func WithRSAllocator() RequestServerOption { - return func(rs *RequestServer) { - alloc := newAllocator() - rs.pktMgr.alloc = alloc - rs.conn.alloc = alloc - } -} - -// WithStartDirectory sets a start directory to use as base for relative paths. -// If unset the default is "/" -func WithStartDirectory(startDirectory string) RequestServerOption { - return func(rs *RequestServer) { - rs.startDirectory = cleanPath(startDirectory) - } -} - -// NewRequestServer creates/allocates/returns new RequestServer. -// Normally there will be one server per user-session. -func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServerOption) *RequestServer { - svrConn := &serverConn{ - conn: conn{ - Reader: rwc, - WriteCloser: rwc, - }, - } - rs := &RequestServer{ - Handlers: h, - - serverConn: svrConn, - pktMgr: newPktMgr(svrConn), - - startDirectory: "/", - - openRequests: make(map[string]*Request), - } - - for _, o := range options { - o(rs) - } - return rs -} - -// New Open packet/Request -func (rs *RequestServer) nextRequest(r *Request) string { - rs.mu.Lock() - defer rs.mu.Unlock() - - rs.handleCount++ - - r.handle = strconv.Itoa(rs.handleCount) - rs.openRequests[r.handle] = r - - return r.handle -} - -// Returns Request from openRequests, bool is false if it is missing. -// -// The Requests in openRequests work essentially as open file descriptors that -// you can do different things with. What you are doing with it are denoted by -// the first packet of that type (read/write/etc). -func (rs *RequestServer) getRequest(handle string) (*Request, bool) { - rs.mu.RLock() - defer rs.mu.RUnlock() - - r, ok := rs.openRequests[handle] - return r, ok -} - -// Close the Request and clear from openRequests map -func (rs *RequestServer) closeRequest(handle string) error { - rs.mu.Lock() - defer rs.mu.Unlock() - - if r, ok := rs.openRequests[handle]; ok { - delete(rs.openRequests, handle) - return r.close() - } - - return EBADF -} - -// Close the read/write/closer to trigger exiting the main server loop -func (rs *RequestServer) Close() error { return rs.conn.Close() } - -func (rs *RequestServer) serveLoop(pktChan chan<- orderedRequest) error { - defer close(pktChan) // shuts down sftpServerWorkers - - var err error - var pkt requestPacket - var pktType uint8 - var pktBytes []byte - - for { - pktType, pktBytes, err = rs.serverConn.recvPacket(rs.pktMgr.getNextOrderID()) - if err != nil { - // we don't care about releasing allocated pages here, the server will quit and the allocator freed - return err - } - - pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) - if err != nil { - switch { - case errors.Is(err, errUnknownExtendedPacket): - // do nothing - default: - debug("makePacket err: %v", err) - rs.conn.Close() // shuts down recvPacket - return err - } - } - - pktChan <- rs.pktMgr.newOrderedRequest(pkt) - } -} - -// Serve requests for user session -func (rs *RequestServer) Serve() error { - defer func() { - if rs.pktMgr.alloc != nil { - rs.pktMgr.alloc.Free() - } - }() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var wg sync.WaitGroup - runWorker := func(ch chan orderedRequest) { - wg.Add(1) - go func() { - defer wg.Done() - if err := rs.packetWorker(ctx, ch); err != nil { - rs.conn.Close() // shuts down recvPacket - } - }() - } - pktChan := rs.pktMgr.workerChan(runWorker) - - err := rs.serveLoop(pktChan) - - wg.Wait() // wait for all workers to exit - - rs.mu.Lock() - defer rs.mu.Unlock() - - // make sure all open requests are properly closed - // (eg. possible on dropped connections, client crashes, etc.) - for handle, req := range rs.openRequests { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - req.transferError(err) - - delete(rs.openRequests, handle) - req.close() - } - - return err -} - -func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedRequest) error { - for pkt := range pktChan { - orderID := pkt.orderID() - if epkt, ok := pkt.requestPacket.(*sshFxpExtendedPacket); ok { - if epkt.SpecificPacket != nil { - pkt.requestPacket = epkt.SpecificPacket - } - } - - var rpkt responsePacket - switch pkt := pkt.requestPacket.(type) { - case *sshFxInitPacket: - rpkt = &sshFxVersionPacket{Version: sftpProtocolVersion, Extensions: sftpExtensions} - case *sshFxpClosePacket: - handle := pkt.getHandle() - rpkt = statusFromError(pkt.ID, rs.closeRequest(handle)) - case *sshFxpRealpathPacket: - var realPath string - if realPather, ok := rs.Handlers.FileList.(RealPathFileLister); ok { - realPath = realPather.RealPath(pkt.getPath()) - } else { - realPath = cleanPathWithBase(rs.startDirectory, pkt.getPath()) - } - rpkt = cleanPacketPath(pkt, realPath) - case *sshFxpOpendirPacket: - request := requestFromPacket(ctx, pkt, rs.startDirectory) - handle := rs.nextRequest(request) - rpkt = request.opendir(rs.Handlers, pkt) - if _, ok := rpkt.(*sshFxpHandlePacket); !ok { - // if we return an error we have to remove the handle from the active ones - rs.closeRequest(handle) - } - case *sshFxpOpenPacket: - request := requestFromPacket(ctx, pkt, rs.startDirectory) - handle := rs.nextRequest(request) - rpkt = request.open(rs.Handlers, pkt) - if _, ok := rpkt.(*sshFxpHandlePacket); !ok { - // if we return an error we have to remove the handle from the active ones - rs.closeRequest(handle) - } - case *sshFxpFstatPacket: - handle := pkt.getHandle() - request, ok := rs.getRequest(handle) - if !ok { - rpkt = statusFromError(pkt.ID, EBADF) - } else { - request = &Request{ - Method: "Stat", - Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), - } - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) - } - case *sshFxpFsetstatPacket: - handle := pkt.getHandle() - request, ok := rs.getRequest(handle) - if !ok { - rpkt = statusFromError(pkt.ID, EBADF) - } else { - request = &Request{ - Method: "Setstat", - Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), - } - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) - } - case *sshFxpExtendedPacketPosixRename: - request := &Request{ - Method: "PosixRename", - Filepath: cleanPathWithBase(rs.startDirectory, pkt.Oldpath), - Target: cleanPathWithBase(rs.startDirectory, pkt.Newpath), - } - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) - case *sshFxpExtendedPacketStatVFS: - request := &Request{ - Method: "StatVFS", - Filepath: cleanPathWithBase(rs.startDirectory, pkt.Path), - } - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) - case hasHandle: - handle := pkt.getHandle() - request, ok := rs.getRequest(handle) - if !ok { - rpkt = statusFromError(pkt.id(), EBADF) - } else { - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) - } - case hasPath: - request := requestFromPacket(ctx, pkt, rs.startDirectory) - rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) - request.close() - default: - rpkt = statusFromError(pkt.id(), ErrSSHFxOpUnsupported) - } - - rs.pktMgr.readyPacket( - rs.pktMgr.newOrderedResponse(rpkt, orderID)) - } - return nil -} - -// clean and return name packet for file -func cleanPacketPath(pkt *sshFxpRealpathPacket, realPath string) responsePacket { - return &sshFxpNamePacket{ - ID: pkt.id(), - NameAttrs: []*sshFxpNameAttr{ - { - Name: realPath, - LongName: realPath, - Attrs: emptyFileStat, - }, - }, - } -} - -// Makes sure we have a clean POSIX (/) absolute path to work with -func cleanPath(p string) string { - return cleanPathWithBase("/", p) -} - -func cleanPathWithBase(base, p string) string { - p = filepath.ToSlash(filepath.Clean(p)) - if !path.IsAbs(p) { - return path.Join(base, p) - } - return p -} diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go deleted file mode 100644 index 50b08a38..00000000 --- a/vendor/github.com/pkg/sftp/request-unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !windows,!plan9 - -package sftp - -import ( - "errors" - "syscall" -) - -func fakeFileInfoSys() interface{} { - return &syscall.Stat_t{Uid: 65534, Gid: 65534} -} - -func testOsSys(sys interface{}) error { - fstat := sys.(*FileStat) - if fstat.UID != uint32(65534) { - return errors.New("Uid failed to match") - } - if fstat.GID != uint32(65534) { - return errors.New("Gid failed to match") - } - return nil -} - -func toLocalPath(p string) string { - return p -} diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go deleted file mode 100644 index 116c27aa..00000000 --- a/vendor/github.com/pkg/sftp/request.go +++ /dev/null @@ -1,630 +0,0 @@ -package sftp - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "strings" - "sync" - "syscall" -) - -// MaxFilelist is the max number of files to return in a readdir batch. -var MaxFilelist int64 = 100 - -// state encapsulates the reader/writer/readdir from handlers. -type state struct { - mu sync.RWMutex - - writerAt io.WriterAt - readerAt io.ReaderAt - writerAtReaderAt WriterAtReaderAt - listerAt ListerAt - lsoffset int64 -} - -// copy returns a shallow copy the state. -// This is broken out to specific fields, -// because we have to copy around the mutex in state. -func (s *state) copy() state { - s.mu.RLock() - defer s.mu.RUnlock() - - return state{ - writerAt: s.writerAt, - readerAt: s.readerAt, - writerAtReaderAt: s.writerAtReaderAt, - listerAt: s.listerAt, - lsoffset: s.lsoffset, - } -} - -func (s *state) setReaderAt(rd io.ReaderAt) { - s.mu.Lock() - defer s.mu.Unlock() - - s.readerAt = rd -} - -func (s *state) getReaderAt() io.ReaderAt { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.readerAt -} - -func (s *state) setWriterAt(rd io.WriterAt) { - s.mu.Lock() - defer s.mu.Unlock() - - s.writerAt = rd -} - -func (s *state) getWriterAt() io.WriterAt { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.writerAt -} - -func (s *state) setWriterAtReaderAt(rw WriterAtReaderAt) { - s.mu.Lock() - defer s.mu.Unlock() - - s.writerAtReaderAt = rw -} - -func (s *state) getWriterAtReaderAt() WriterAtReaderAt { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.writerAtReaderAt -} - -func (s *state) getAllReaderWriters() (io.ReaderAt, io.WriterAt, WriterAtReaderAt) { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.readerAt, s.writerAt, s.writerAtReaderAt -} - -// Returns current offset for file list -func (s *state) lsNext() int64 { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.lsoffset -} - -// Increases next offset -func (s *state) lsInc(offset int64) { - s.mu.Lock() - defer s.mu.Unlock() - - s.lsoffset += offset -} - -// manage file read/write state -func (s *state) setListerAt(la ListerAt) { - s.mu.Lock() - defer s.mu.Unlock() - - s.listerAt = la -} - -func (s *state) getListerAt() ListerAt { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.listerAt -} - -// Request contains the data and state for the incoming service request. -type Request struct { - // Get, Put, Setstat, Stat, Rename, Remove - // Rmdir, Mkdir, List, Readlink, Link, Symlink - Method string - Filepath string - Flags uint32 - Attrs []byte // convert to sub-struct - Target string // for renames and sym-links - handle string - - // reader/writer/readdir from handlers - state - - // context lasts duration of request - ctx context.Context - cancelCtx context.CancelFunc -} - -// NewRequest creates a new Request object. -func NewRequest(method, path string) *Request { - return &Request{ - Method: method, - Filepath: cleanPath(path), - } -} - -// copy returns a shallow copy of existing request. -// This is broken out to specific fields, -// because we have to copy around the mutex in state. -func (r *Request) copy() *Request { - return &Request{ - Method: r.Method, - Filepath: r.Filepath, - Flags: r.Flags, - Attrs: r.Attrs, - Target: r.Target, - handle: r.handle, - - state: r.state.copy(), - - ctx: r.ctx, - cancelCtx: r.cancelCtx, - } -} - -// New Request initialized based on packet data -func requestFromPacket(ctx context.Context, pkt hasPath, baseDir string) *Request { - request := &Request{ - Method: requestMethod(pkt), - Filepath: cleanPathWithBase(baseDir, pkt.getPath()), - } - request.ctx, request.cancelCtx = context.WithCancel(ctx) - - switch p := pkt.(type) { - case *sshFxpOpenPacket: - request.Flags = p.Pflags - case *sshFxpSetstatPacket: - request.Flags = p.Flags - request.Attrs = p.Attrs.([]byte) - case *sshFxpRenamePacket: - request.Target = cleanPathWithBase(baseDir, p.Newpath) - case *sshFxpSymlinkPacket: - // NOTE: given a POSIX compliant signature: symlink(target, linkpath string) - // this makes Request.Target the linkpath, and Request.Filepath the target. - request.Target = cleanPathWithBase(baseDir, p.Linkpath) - case *sshFxpExtendedPacketHardlink: - request.Target = cleanPathWithBase(baseDir, p.Newpath) - } - return request -} - -// Context returns the request's context. To change the context, -// use WithContext. -// -// The returned context is always non-nil; it defaults to the -// background context. -// -// For incoming server requests, the context is canceled when the -// request is complete or the client's connection closes. -func (r *Request) Context() context.Context { - if r.ctx != nil { - return r.ctx - } - return context.Background() -} - -// WithContext returns a copy of r with its context changed to ctx. -// The provided ctx must be non-nil. -func (r *Request) WithContext(ctx context.Context) *Request { - if ctx == nil { - panic("nil context") - } - r2 := r.copy() - r2.ctx = ctx - r2.cancelCtx = nil - return r2 -} - -// Close reader/writer if possible -func (r *Request) close() error { - defer func() { - if r.cancelCtx != nil { - r.cancelCtx() - } - }() - - rd, wr, rw := r.getAllReaderWriters() - - var err error - - // Close errors on a Writer are far more likely to be the important one. - // As they can be information that there was a loss of data. - if c, ok := wr.(io.Closer); ok { - if err2 := c.Close(); err == nil { - // update error if it is still nil - err = err2 - } - } - - if c, ok := rw.(io.Closer); ok { - if err2 := c.Close(); err == nil { - // update error if it is still nil - err = err2 - - r.setWriterAtReaderAt(nil) - } - } - - if c, ok := rd.(io.Closer); ok { - if err2 := c.Close(); err == nil { - // update error if it is still nil - err = err2 - } - } - - return err -} - -// Notify transfer error if any -func (r *Request) transferError(err error) { - if err == nil { - return - } - - rd, wr, rw := r.getAllReaderWriters() - - if t, ok := wr.(TransferError); ok { - t.TransferError(err) - } - - if t, ok := rw.(TransferError); ok { - t.TransferError(err) - } - - if t, ok := rd.(TransferError); ok { - t.TransferError(err) - } -} - -// called from worker to handle packet/request -func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { - switch r.Method { - case "Get": - return fileget(handlers.FileGet, r, pkt, alloc, orderID) - case "Put": - return fileput(handlers.FilePut, r, pkt, alloc, orderID) - case "Open": - return fileputget(handlers.FilePut, r, pkt, alloc, orderID) - case "Setstat", "Rename", "Rmdir", "Mkdir", "Link", "Symlink", "Remove", "PosixRename", "StatVFS": - return filecmd(handlers.FileCmd, r, pkt) - case "List": - return filelist(handlers.FileList, r, pkt) - case "Stat", "Lstat", "Readlink": - return filestat(handlers.FileList, r, pkt) - default: - return statusFromError(pkt.id(), fmt.Errorf("unexpected method: %s", r.Method)) - } -} - -// Additional initialization for Open packets -func (r *Request) open(h Handlers, pkt requestPacket) responsePacket { - flags := r.Pflags() - - id := pkt.id() - - switch { - case flags.Write, flags.Append, flags.Creat, flags.Trunc: - if flags.Read { - if openFileWriter, ok := h.FilePut.(OpenFileWriter); ok { - r.Method = "Open" - rw, err := openFileWriter.OpenFile(r) - if err != nil { - return statusFromError(id, err) - } - - r.setWriterAtReaderAt(rw) - - return &sshFxpHandlePacket{ - ID: id, - Handle: r.handle, - } - } - } - - r.Method = "Put" - wr, err := h.FilePut.Filewrite(r) - if err != nil { - return statusFromError(id, err) - } - - r.setWriterAt(wr) - - case flags.Read: - r.Method = "Get" - rd, err := h.FileGet.Fileread(r) - if err != nil { - return statusFromError(id, err) - } - - r.setReaderAt(rd) - - default: - return statusFromError(id, errors.New("bad file flags")) - } - - return &sshFxpHandlePacket{ - ID: id, - Handle: r.handle, - } -} - -func (r *Request) opendir(h Handlers, pkt requestPacket) responsePacket { - r.Method = "List" - la, err := h.FileList.Filelist(r) - if err != nil { - return statusFromError(pkt.id(), wrapPathError(r.Filepath, err)) - } - - r.setListerAt(la) - - return &sshFxpHandlePacket{ - ID: pkt.id(), - Handle: r.handle, - } -} - -// wrap FileReader handler -func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { - rd := r.getReaderAt() - if rd == nil { - return statusFromError(pkt.id(), errors.New("unexpected read packet")) - } - - data, offset, _ := packetData(pkt, alloc, orderID) - - n, err := rd.ReadAt(data, offset) - // only return EOF error if no data left to read - if err != nil && (err != io.EOF || n == 0) { - return statusFromError(pkt.id(), err) - } - - return &sshFxpDataPacket{ - ID: pkt.id(), - Length: uint32(n), - Data: data[:n], - } -} - -// wrap FileWriter handler -func fileput(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { - wr := r.getWriterAt() - if wr == nil { - return statusFromError(pkt.id(), errors.New("unexpected write packet")) - } - - data, offset, _ := packetData(pkt, alloc, orderID) - - _, err := wr.WriteAt(data, offset) - return statusFromError(pkt.id(), err) -} - -// wrap OpenFileWriter handler -func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { - rw := r.getWriterAtReaderAt() - if rw == nil { - return statusFromError(pkt.id(), errors.New("unexpected write and read packet")) - } - - switch p := pkt.(type) { - case *sshFxpReadPacket: - data, offset := p.getDataSlice(alloc, orderID), int64(p.Offset) - - n, err := rw.ReadAt(data, offset) - // only return EOF error if no data left to read - if err != nil && (err != io.EOF || n == 0) { - return statusFromError(pkt.id(), err) - } - - return &sshFxpDataPacket{ - ID: pkt.id(), - Length: uint32(n), - Data: data[:n], - } - - case *sshFxpWritePacket: - data, offset := p.Data, int64(p.Offset) - - _, err := rw.WriteAt(data, offset) - return statusFromError(pkt.id(), err) - - default: - return statusFromError(pkt.id(), errors.New("unexpected packet type for read or write")) - } -} - -// file data for additional read/write packets -func packetData(p requestPacket, alloc *allocator, orderID uint32) (data []byte, offset int64, length uint32) { - switch p := p.(type) { - case *sshFxpReadPacket: - return p.getDataSlice(alloc, orderID), int64(p.Offset), p.Len - case *sshFxpWritePacket: - return p.Data, int64(p.Offset), p.Length - } - return -} - -// wrap FileCmder handler -func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket { - switch p := pkt.(type) { - case *sshFxpFsetstatPacket: - r.Flags = p.Flags - r.Attrs = p.Attrs.([]byte) - } - - switch r.Method { - case "PosixRename": - if posixRenamer, ok := h.(PosixRenameFileCmder); ok { - err := posixRenamer.PosixRename(r) - return statusFromError(pkt.id(), err) - } - - // PosixRenameFileCmder not implemented handle this request as a Rename - r.Method = "Rename" - err := h.Filecmd(r) - return statusFromError(pkt.id(), err) - - case "StatVFS": - if statVFSCmdr, ok := h.(StatVFSFileCmder); ok { - stat, err := statVFSCmdr.StatVFS(r) - if err != nil { - return statusFromError(pkt.id(), err) - } - stat.ID = pkt.id() - return stat - } - - return statusFromError(pkt.id(), ErrSSHFxOpUnsupported) - } - - err := h.Filecmd(r) - return statusFromError(pkt.id(), err) -} - -// wrap FileLister handler -func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket { - lister := r.getListerAt() - if lister == nil { - return statusFromError(pkt.id(), errors.New("unexpected dir packet")) - } - - offset := r.lsNext() - finfo := make([]os.FileInfo, MaxFilelist) - n, err := lister.ListAt(finfo, offset) - r.lsInc(int64(n)) - // ignore EOF as we only return it when there are no results - finfo = finfo[:n] // avoid need for nil tests below - - switch r.Method { - case "List": - if err != nil && (err != io.EOF || n == 0) { - return statusFromError(pkt.id(), err) - } - - nameAttrs := make([]*sshFxpNameAttr, 0, len(finfo)) - - // If the type conversion fails, we get untyped `nil`, - // which is handled by not looking up any names. - idLookup, _ := h.(NameLookupFileLister) - - for _, fi := range finfo { - nameAttrs = append(nameAttrs, &sshFxpNameAttr{ - Name: fi.Name(), - LongName: runLs(idLookup, fi), - Attrs: []interface{}{fi}, - }) - } - - return &sshFxpNamePacket{ - ID: pkt.id(), - NameAttrs: nameAttrs, - } - - default: - err = fmt.Errorf("unexpected method: %s", r.Method) - return statusFromError(pkt.id(), err) - } -} - -func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket { - var lister ListerAt - var err error - - if r.Method == "Lstat" { - if lstatFileLister, ok := h.(LstatFileLister); ok { - lister, err = lstatFileLister.Lstat(r) - } else { - // LstatFileLister not implemented handle this request as a Stat - r.Method = "Stat" - lister, err = h.Filelist(r) - } - } else { - lister, err = h.Filelist(r) - } - if err != nil { - return statusFromError(pkt.id(), err) - } - finfo := make([]os.FileInfo, 1) - n, err := lister.ListAt(finfo, 0) - finfo = finfo[:n] // avoid need for nil tests below - - switch r.Method { - case "Stat", "Lstat": - if err != nil && err != io.EOF { - return statusFromError(pkt.id(), err) - } - if n == 0 { - err = &os.PathError{ - Op: strings.ToLower(r.Method), - Path: r.Filepath, - Err: syscall.ENOENT, - } - return statusFromError(pkt.id(), err) - } - return &sshFxpStatResponse{ - ID: pkt.id(), - info: finfo[0], - } - case "Readlink": - if err != nil && err != io.EOF { - return statusFromError(pkt.id(), err) - } - if n == 0 { - err = &os.PathError{ - Op: "readlink", - Path: r.Filepath, - Err: syscall.ENOENT, - } - return statusFromError(pkt.id(), err) - } - filename := finfo[0].Name() - return &sshFxpNamePacket{ - ID: pkt.id(), - NameAttrs: []*sshFxpNameAttr{ - { - Name: filename, - LongName: filename, - Attrs: emptyFileStat, - }, - }, - } - default: - err = fmt.Errorf("unexpected method: %s", r.Method) - return statusFromError(pkt.id(), err) - } -} - -// init attributes of request object from packet data -func requestMethod(p requestPacket) (method string) { - switch p.(type) { - case *sshFxpReadPacket, *sshFxpWritePacket, *sshFxpOpenPacket: - // set in open() above - case *sshFxpOpendirPacket, *sshFxpReaddirPacket: - // set in opendir() above - case *sshFxpSetstatPacket, *sshFxpFsetstatPacket: - method = "Setstat" - case *sshFxpRenamePacket: - method = "Rename" - case *sshFxpSymlinkPacket: - method = "Symlink" - case *sshFxpRemovePacket: - method = "Remove" - case *sshFxpStatPacket, *sshFxpFstatPacket: - method = "Stat" - case *sshFxpLstatPacket: - method = "Lstat" - case *sshFxpRmdirPacket: - method = "Rmdir" - case *sshFxpReadlinkPacket: - method = "Readlink" - case *sshFxpMkdirPacket: - method = "Mkdir" - case *sshFxpExtendedPacketHardlink: - method = "Link" - } - return method -} diff --git a/vendor/github.com/pkg/sftp/request_windows.go b/vendor/github.com/pkg/sftp/request_windows.go deleted file mode 100644 index 1f6d3df1..00000000 --- a/vendor/github.com/pkg/sftp/request_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package sftp - -import ( - "path" - "path/filepath" - "syscall" -) - -func fakeFileInfoSys() interface{} { - return syscall.Win32FileAttributeData{} -} - -func testOsSys(sys interface{}) error { - return nil -} - -func toLocalPath(p string) string { - lp := filepath.FromSlash(p) - - if path.IsAbs(p) { - tmp := lp - for len(tmp) > 0 && tmp[0] == '\\' { - tmp = tmp[1:] - } - - if filepath.IsAbs(tmp) { - // If the FromSlash without any starting slashes is absolute, - // then we have a filepath encoded with a prefix '/'. - // e.g. "/C:/Windows" to "C:\\Windows" - return tmp - } - - tmp += "\\" - - if filepath.IsAbs(tmp) { - // If the FromSlash without any starting slashes but with extra end slash is absolute, - // then we have a filepath encoded with a prefix '/' and a dropped '/' at the end. - // e.g. "/C:" to "C:\\" - return tmp - } - } - - return lp -} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go deleted file mode 100644 index 529052b4..00000000 --- a/vendor/github.com/pkg/sftp/server.go +++ /dev/null @@ -1,616 +0,0 @@ -package sftp - -// sftp server counterpart - -import ( - "encoding" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "sync" - "syscall" - "time" -) - -const ( - // SftpServerWorkerCount defines the number of workers for the SFTP server - SftpServerWorkerCount = 8 -) - -// Server is an SSH File Transfer Protocol (sftp) server. -// This is intended to provide the sftp subsystem to an ssh server daemon. -// This implementation currently supports most of sftp server protocol version 3, -// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 -type Server struct { - *serverConn - debugStream io.Writer - readOnly bool - pktMgr *packetManager - openFiles map[string]*os.File - openFilesLock sync.RWMutex - handleCount int -} - -func (svr *Server) nextHandle(f *os.File) string { - svr.openFilesLock.Lock() - defer svr.openFilesLock.Unlock() - svr.handleCount++ - handle := strconv.Itoa(svr.handleCount) - svr.openFiles[handle] = f - return handle -} - -func (svr *Server) closeHandle(handle string) error { - svr.openFilesLock.Lock() - defer svr.openFilesLock.Unlock() - if f, ok := svr.openFiles[handle]; ok { - delete(svr.openFiles, handle) - return f.Close() - } - - return EBADF -} - -func (svr *Server) getHandle(handle string) (*os.File, bool) { - svr.openFilesLock.RLock() - defer svr.openFilesLock.RUnlock() - f, ok := svr.openFiles[handle] - return f, ok -} - -type serverRespondablePacket interface { - encoding.BinaryUnmarshaler - id() uint32 - respond(svr *Server) responsePacket -} - -// NewServer creates a new Server instance around the provided streams, serving -// content from the root of the filesystem. Optionally, ServerOption -// functions may be specified to further configure the Server. -// -// A subsequent call to Serve() is required to begin serving files over SFTP. -func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) { - svrConn := &serverConn{ - conn: conn{ - Reader: rwc, - WriteCloser: rwc, - }, - } - s := &Server{ - serverConn: svrConn, - debugStream: ioutil.Discard, - pktMgr: newPktMgr(svrConn), - openFiles: make(map[string]*os.File), - } - - for _, o := range options { - if err := o(s); err != nil { - return nil, err - } - } - - return s, nil -} - -// A ServerOption is a function which applies configuration to a Server. -type ServerOption func(*Server) error - -// WithDebug enables Server debugging output to the supplied io.Writer. -func WithDebug(w io.Writer) ServerOption { - return func(s *Server) error { - s.debugStream = w - return nil - } -} - -// ReadOnly configures a Server to serve files in read-only mode. -func ReadOnly() ServerOption { - return func(s *Server) error { - s.readOnly = true - return nil - } -} - -// WithAllocator enable the allocator. -// After processing a packet we keep in memory the allocated slices -// and we reuse them for new packets. -// The allocator is experimental -func WithAllocator() ServerOption { - return func(s *Server) error { - alloc := newAllocator() - s.pktMgr.alloc = alloc - s.conn.alloc = alloc - return nil - } -} - -type rxPacket struct { - pktType fxp - pktBytes []byte -} - -// Up to N parallel servers -func (svr *Server) sftpServerWorker(pktChan chan orderedRequest) error { - for pkt := range pktChan { - // readonly checks - readonly := true - switch pkt := pkt.requestPacket.(type) { - case notReadOnly: - readonly = false - case *sshFxpOpenPacket: - readonly = pkt.readonly() - case *sshFxpExtendedPacket: - readonly = pkt.readonly() - } - - // If server is operating read-only and a write operation is requested, - // return permission denied - if !readonly && svr.readOnly { - svr.pktMgr.readyPacket( - svr.pktMgr.newOrderedResponse(statusFromError(pkt.id(), syscall.EPERM), pkt.orderID()), - ) - continue - } - - if err := handlePacket(svr, pkt); err != nil { - return err - } - } - return nil -} - -func handlePacket(s *Server, p orderedRequest) error { - var rpkt responsePacket - orderID := p.orderID() - switch p := p.requestPacket.(type) { - case *sshFxInitPacket: - rpkt = &sshFxVersionPacket{ - Version: sftpProtocolVersion, - Extensions: sftpExtensions, - } - case *sshFxpStatPacket: - // stat the requested file - info, err := os.Stat(toLocalPath(p.Path)) - rpkt = &sshFxpStatResponse{ - ID: p.ID, - info: info, - } - if err != nil { - rpkt = statusFromError(p.ID, err) - } - case *sshFxpLstatPacket: - // stat the requested file - info, err := os.Lstat(toLocalPath(p.Path)) - rpkt = &sshFxpStatResponse{ - ID: p.ID, - info: info, - } - if err != nil { - rpkt = statusFromError(p.ID, err) - } - case *sshFxpFstatPacket: - f, ok := s.getHandle(p.Handle) - var err error = EBADF - var info os.FileInfo - if ok { - info, err = f.Stat() - rpkt = &sshFxpStatResponse{ - ID: p.ID, - info: info, - } - } - if err != nil { - rpkt = statusFromError(p.ID, err) - } - case *sshFxpMkdirPacket: - // TODO FIXME: ignore flags field - err := os.Mkdir(toLocalPath(p.Path), 0755) - rpkt = statusFromError(p.ID, err) - case *sshFxpRmdirPacket: - err := os.Remove(toLocalPath(p.Path)) - rpkt = statusFromError(p.ID, err) - case *sshFxpRemovePacket: - err := os.Remove(toLocalPath(p.Filename)) - rpkt = statusFromError(p.ID, err) - case *sshFxpRenamePacket: - err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) - rpkt = statusFromError(p.ID, err) - case *sshFxpSymlinkPacket: - err := os.Symlink(toLocalPath(p.Targetpath), toLocalPath(p.Linkpath)) - rpkt = statusFromError(p.ID, err) - case *sshFxpClosePacket: - rpkt = statusFromError(p.ID, s.closeHandle(p.Handle)) - case *sshFxpReadlinkPacket: - f, err := os.Readlink(toLocalPath(p.Path)) - rpkt = &sshFxpNamePacket{ - ID: p.ID, - NameAttrs: []*sshFxpNameAttr{ - { - Name: f, - LongName: f, - Attrs: emptyFileStat, - }, - }, - } - if err != nil { - rpkt = statusFromError(p.ID, err) - } - case *sshFxpRealpathPacket: - f, err := filepath.Abs(toLocalPath(p.Path)) - f = cleanPath(f) - rpkt = &sshFxpNamePacket{ - ID: p.ID, - NameAttrs: []*sshFxpNameAttr{ - { - Name: f, - LongName: f, - Attrs: emptyFileStat, - }, - }, - } - if err != nil { - rpkt = statusFromError(p.ID, err) - } - case *sshFxpOpendirPacket: - p.Path = toLocalPath(p.Path) - - if stat, err := os.Stat(p.Path); err != nil { - rpkt = statusFromError(p.ID, err) - } else if !stat.IsDir() { - rpkt = statusFromError(p.ID, &os.PathError{ - Path: p.Path, Err: syscall.ENOTDIR}) - } else { - rpkt = (&sshFxpOpenPacket{ - ID: p.ID, - Path: p.Path, - Pflags: sshFxfRead, - }).respond(s) - } - case *sshFxpReadPacket: - var err error = EBADF - f, ok := s.getHandle(p.Handle) - if ok { - err = nil - data := p.getDataSlice(s.pktMgr.alloc, orderID) - n, _err := f.ReadAt(data, int64(p.Offset)) - if _err != nil && (_err != io.EOF || n == 0) { - err = _err - } - rpkt = &sshFxpDataPacket{ - ID: p.ID, - Length: uint32(n), - Data: data[:n], - // do not use data[:n:n] here to clamp the capacity, we allocated extra capacity above to avoid reallocations - } - } - if err != nil { - rpkt = statusFromError(p.ID, err) - } - - case *sshFxpWritePacket: - f, ok := s.getHandle(p.Handle) - var err error = EBADF - if ok { - _, err = f.WriteAt(p.Data, int64(p.Offset)) - } - rpkt = statusFromError(p.ID, err) - case *sshFxpExtendedPacket: - if p.SpecificPacket == nil { - rpkt = statusFromError(p.ID, ErrSSHFxOpUnsupported) - } else { - rpkt = p.respond(s) - } - case serverRespondablePacket: - rpkt = p.respond(s) - default: - return fmt.Errorf("unexpected packet type %T", p) - } - - s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, orderID)) - return nil -} - -// Serve serves SFTP connections until the streams stop or the SFTP subsystem -// is stopped. -func (svr *Server) Serve() error { - defer func() { - if svr.pktMgr.alloc != nil { - svr.pktMgr.alloc.Free() - } - }() - var wg sync.WaitGroup - runWorker := func(ch chan orderedRequest) { - wg.Add(1) - go func() { - defer wg.Done() - if err := svr.sftpServerWorker(ch); err != nil { - svr.conn.Close() // shuts down recvPacket - } - }() - } - pktChan := svr.pktMgr.workerChan(runWorker) - - var err error - var pkt requestPacket - var pktType uint8 - var pktBytes []byte - for { - pktType, pktBytes, err = svr.serverConn.recvPacket(svr.pktMgr.getNextOrderID()) - if err != nil { - // we don't care about releasing allocated pages here, the server will quit and the allocator freed - break - } - - pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) - if err != nil { - switch { - case errors.Is(err, errUnknownExtendedPacket): - //if err := svr.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil { - // debug("failed to send err packet: %v", err) - // svr.conn.Close() // shuts down recvPacket - // break - //} - default: - debug("makePacket err: %v", err) - svr.conn.Close() // shuts down recvPacket - break - } - } - - pktChan <- svr.pktMgr.newOrderedRequest(pkt) - } - - close(pktChan) // shuts down sftpServerWorkers - wg.Wait() // wait for all workers to exit - - // close any still-open files - for handle, file := range svr.openFiles { - fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name()) - file.Close() - } - return err // error from recvPacket -} - -type ider interface { - id() uint32 -} - -// The init packet has no ID, so we just return a zero-value ID -func (p *sshFxInitPacket) id() uint32 { return 0 } - -type sshFxpStatResponse struct { - ID uint32 - info os.FileInfo -} - -func (p *sshFxpStatResponse) marshalPacket() ([]byte, []byte, error) { - l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(id) - - b := make([]byte, 4, l) - b = append(b, sshFxpAttrs) - b = marshalUint32(b, p.ID) - - var payload []byte - payload = marshalFileInfo(payload, p.info) - - return b, payload, nil -} - -func (p *sshFxpStatResponse) MarshalBinary() ([]byte, error) { - header, payload, err := p.marshalPacket() - return append(header, payload...), err -} - -var emptyFileStat = []interface{}{uint32(0)} - -func (p *sshFxpOpenPacket) readonly() bool { - return !p.hasPflags(sshFxfWrite) -} - -func (p *sshFxpOpenPacket) hasPflags(flags ...uint32) bool { - for _, f := range flags { - if p.Pflags&f == 0 { - return false - } - } - return true -} - -func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket { - var osFlags int - if p.hasPflags(sshFxfRead, sshFxfWrite) { - osFlags |= os.O_RDWR - } else if p.hasPflags(sshFxfWrite) { - osFlags |= os.O_WRONLY - } else if p.hasPflags(sshFxfRead) { - osFlags |= os.O_RDONLY - } else { - // how are they opening? - return statusFromError(p.ID, syscall.EINVAL) - } - - // Don't use O_APPEND flag as it conflicts with WriteAt. - // The sshFxfAppend flag is a no-op here as the client sends the offsets. - - if p.hasPflags(sshFxfCreat) { - osFlags |= os.O_CREATE - } - if p.hasPflags(sshFxfTrunc) { - osFlags |= os.O_TRUNC - } - if p.hasPflags(sshFxfExcl) { - osFlags |= os.O_EXCL - } - - f, err := os.OpenFile(toLocalPath(p.Path), osFlags, 0644) - if err != nil { - return statusFromError(p.ID, err) - } - - handle := svr.nextHandle(f) - return &sshFxpHandlePacket{ID: p.ID, Handle: handle} -} - -func (p *sshFxpReaddirPacket) respond(svr *Server) responsePacket { - f, ok := svr.getHandle(p.Handle) - if !ok { - return statusFromError(p.ID, EBADF) - } - - dirents, err := f.Readdir(128) - if err != nil { - return statusFromError(p.ID, err) - } - - idLookup := osIDLookup{} - - ret := &sshFxpNamePacket{ID: p.ID} - for _, dirent := range dirents { - ret.NameAttrs = append(ret.NameAttrs, &sshFxpNameAttr{ - Name: dirent.Name(), - LongName: runLs(idLookup, dirent), - Attrs: []interface{}{dirent}, - }) - } - return ret -} - -func (p *sshFxpSetstatPacket) respond(svr *Server) responsePacket { - // additional unmarshalling is required for each possibility here - b := p.Attrs.([]byte) - var err error - - p.Path = toLocalPath(p.Path) - - debug("setstat name \"%s\"", p.Path) - if (p.Flags & sshFileXferAttrSize) != 0 { - var size uint64 - if size, b, err = unmarshalUint64Safe(b); err == nil { - err = os.Truncate(p.Path, int64(size)) - } - } - if (p.Flags & sshFileXferAttrPermissions) != 0 { - var mode uint32 - if mode, b, err = unmarshalUint32Safe(b); err == nil { - err = os.Chmod(p.Path, os.FileMode(mode)) - } - } - if (p.Flags & sshFileXferAttrACmodTime) != 0 { - var atime uint32 - var mtime uint32 - if atime, b, err = unmarshalUint32Safe(b); err != nil { - } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { - } else { - atimeT := time.Unix(int64(atime), 0) - mtimeT := time.Unix(int64(mtime), 0) - err = os.Chtimes(p.Path, atimeT, mtimeT) - } - } - if (p.Flags & sshFileXferAttrUIDGID) != 0 { - var uid uint32 - var gid uint32 - if uid, b, err = unmarshalUint32Safe(b); err != nil { - } else if gid, _, err = unmarshalUint32Safe(b); err != nil { - } else { - err = os.Chown(p.Path, int(uid), int(gid)) - } - } - - return statusFromError(p.ID, err) -} - -func (p *sshFxpFsetstatPacket) respond(svr *Server) responsePacket { - f, ok := svr.getHandle(p.Handle) - if !ok { - return statusFromError(p.ID, EBADF) - } - - // additional unmarshalling is required for each possibility here - b := p.Attrs.([]byte) - var err error - - debug("fsetstat name \"%s\"", f.Name()) - if (p.Flags & sshFileXferAttrSize) != 0 { - var size uint64 - if size, b, err = unmarshalUint64Safe(b); err == nil { - err = f.Truncate(int64(size)) - } - } - if (p.Flags & sshFileXferAttrPermissions) != 0 { - var mode uint32 - if mode, b, err = unmarshalUint32Safe(b); err == nil { - err = f.Chmod(os.FileMode(mode)) - } - } - if (p.Flags & sshFileXferAttrACmodTime) != 0 { - var atime uint32 - var mtime uint32 - if atime, b, err = unmarshalUint32Safe(b); err != nil { - } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { - } else { - atimeT := time.Unix(int64(atime), 0) - mtimeT := time.Unix(int64(mtime), 0) - err = os.Chtimes(f.Name(), atimeT, mtimeT) - } - } - if (p.Flags & sshFileXferAttrUIDGID) != 0 { - var uid uint32 - var gid uint32 - if uid, b, err = unmarshalUint32Safe(b); err != nil { - } else if gid, _, err = unmarshalUint32Safe(b); err != nil { - } else { - err = f.Chown(int(uid), int(gid)) - } - } - - return statusFromError(p.ID, err) -} - -func statusFromError(id uint32, err error) *sshFxpStatusPacket { - ret := &sshFxpStatusPacket{ - ID: id, - StatusError: StatusError{ - // sshFXOk = 0 - // sshFXEOF = 1 - // sshFXNoSuchFile = 2 ENOENT - // sshFXPermissionDenied = 3 - // sshFXFailure = 4 - // sshFXBadMessage = 5 - // sshFXNoConnection = 6 - // sshFXConnectionLost = 7 - // sshFXOPUnsupported = 8 - Code: sshFxOk, - }, - } - if err == nil { - return ret - } - - debug("statusFromError: error is %T %#v", err, err) - ret.StatusError.Code = sshFxFailure - ret.StatusError.msg = err.Error() - - if os.IsNotExist(err) { - ret.StatusError.Code = sshFxNoSuchFile - return ret - } - if code, ok := translateSyscallError(err); ok { - ret.StatusError.Code = code - return ret - } - - switch e := err.(type) { - case fxerr: - ret.StatusError.Code = uint32(e) - default: - if e == io.EOF { - ret.StatusError.Code = sshFxEOF - } - } - - return ret -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go deleted file mode 100644 index 8c01dac5..00000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go +++ /dev/null @@ -1,21 +0,0 @@ -package sftp - -import ( - "syscall" -) - -func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { - return &StatVFS{ - Bsize: uint64(stat.Bsize), - Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here - Blocks: stat.Blocks, - Bfree: stat.Bfree, - Bavail: stat.Bavail, - Files: stat.Files, - Ffree: stat.Ffree, - Favail: stat.Ffree, // not sure how to calculate Favail - Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness? - Flag: uint64(stat.Flags), // assuming POSIX? - Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024 - }, nil -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go deleted file mode 100644 index 94b6d832..00000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_impl.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build darwin linux - -// fill in statvfs structure with OS specific values -// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance) - -package sftp - -import ( - "syscall" -) - -func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { - retPkt, err := getStatVFSForPath(p.Path) - if err != nil { - return statusFromError(p.ID, err) - } - retPkt.ID = p.ID - - return retPkt -} - -func getStatVFSForPath(name string) (*StatVFS, error) { - var stat syscall.Statfs_t - if err := syscall.Statfs(name, &stat); err != nil { - return nil, err - } - - return statvfsFromStatfst(&stat) -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go deleted file mode 100644 index 1d180d47..00000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_linux.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux - -package sftp - -import ( - "syscall" -) - -func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { - return &StatVFS{ - Bsize: uint64(stat.Bsize), - Frsize: uint64(stat.Frsize), - Blocks: stat.Blocks, - Bfree: stat.Bfree, - Bavail: stat.Bavail, - Files: stat.Files, - Ffree: stat.Ffree, - Favail: stat.Ffree, // not sure how to calculate Favail - Flag: uint64(stat.Flags), // assuming POSIX? - Namemax: uint64(stat.Namelen), - }, nil -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_plan9.go b/vendor/github.com/pkg/sftp/server_statvfs_plan9.go deleted file mode 100644 index e71a27d3..00000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_plan9.go +++ /dev/null @@ -1,13 +0,0 @@ -package sftp - -import ( - "syscall" -) - -func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { - return statusFromError(p.ID, syscall.EPLAN9) -} - -func getStatVFSForPath(name string) (*StatVFS, error) { - return nil, syscall.EPLAN9 -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go deleted file mode 100644 index fbf49068..00000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !darwin,!linux,!plan9 - -package sftp - -import ( - "syscall" -) - -func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { - return statusFromError(p.ID, syscall.ENOTSUP) -} - -func getStatVFSForPath(name string) (*StatVFS, error) { - return nil, syscall.ENOTSUP -} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go deleted file mode 100644 index 9a63c39d..00000000 --- a/vendor/github.com/pkg/sftp/sftp.go +++ /dev/null @@ -1,258 +0,0 @@ -// Package sftp implements the SSH File Transfer Protocol as described in -// https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 -package sftp - -import ( - "fmt" -) - -const ( - sshFxpInit = 1 - sshFxpVersion = 2 - sshFxpOpen = 3 - sshFxpClose = 4 - sshFxpRead = 5 - sshFxpWrite = 6 - sshFxpLstat = 7 - sshFxpFstat = 8 - sshFxpSetstat = 9 - sshFxpFsetstat = 10 - sshFxpOpendir = 11 - sshFxpReaddir = 12 - sshFxpRemove = 13 - sshFxpMkdir = 14 - sshFxpRmdir = 15 - sshFxpRealpath = 16 - sshFxpStat = 17 - sshFxpRename = 18 - sshFxpReadlink = 19 - sshFxpSymlink = 20 - sshFxpStatus = 101 - sshFxpHandle = 102 - sshFxpData = 103 - sshFxpName = 104 - sshFxpAttrs = 105 - sshFxpExtended = 200 - sshFxpExtendedReply = 201 -) - -const ( - sshFxOk = 0 - sshFxEOF = 1 - sshFxNoSuchFile = 2 - sshFxPermissionDenied = 3 - sshFxFailure = 4 - sshFxBadMessage = 5 - sshFxNoConnection = 6 - sshFxConnectionLost = 7 - sshFxOPUnsupported = 8 - - // see draft-ietf-secsh-filexfer-13 - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 - sshFxInvalidHandle = 9 - sshFxNoSuchPath = 10 - sshFxFileAlreadyExists = 11 - sshFxWriteProtect = 12 - sshFxNoMedia = 13 - sshFxNoSpaceOnFilesystem = 14 - sshFxQuotaExceeded = 15 - sshFxUnknownPrincipal = 16 - sshFxLockConflict = 17 - sshFxDirNotEmpty = 18 - sshFxNotADirectory = 19 - sshFxInvalidFilename = 20 - sshFxLinkLoop = 21 - sshFxCannotDelete = 22 - sshFxInvalidParameter = 23 - sshFxFileIsADirectory = 24 - sshFxByteRangeLockConflict = 25 - sshFxByteRangeLockRefused = 26 - sshFxDeletePending = 27 - sshFxFileCorrupt = 28 - sshFxOwnerInvalid = 29 - sshFxGroupInvalid = 30 - sshFxNoMatchingByteRangeLock = 31 -) - -const ( - sshFxfRead = 0x00000001 - sshFxfWrite = 0x00000002 - sshFxfAppend = 0x00000004 - sshFxfCreat = 0x00000008 - sshFxfTrunc = 0x00000010 - sshFxfExcl = 0x00000020 -) - -var ( - // supportedSFTPExtensions defines the supported extensions - supportedSFTPExtensions = []sshExtensionPair{ - {"hardlink@openssh.com", "1"}, - {"posix-rename@openssh.com", "1"}, - {"statvfs@openssh.com", "2"}, - } - sftpExtensions = supportedSFTPExtensions -) - -type fxp uint8 - -func (f fxp) String() string { - switch f { - case sshFxpInit: - return "SSH_FXP_INIT" - case sshFxpVersion: - return "SSH_FXP_VERSION" - case sshFxpOpen: - return "SSH_FXP_OPEN" - case sshFxpClose: - return "SSH_FXP_CLOSE" - case sshFxpRead: - return "SSH_FXP_READ" - case sshFxpWrite: - return "SSH_FXP_WRITE" - case sshFxpLstat: - return "SSH_FXP_LSTAT" - case sshFxpFstat: - return "SSH_FXP_FSTAT" - case sshFxpSetstat: - return "SSH_FXP_SETSTAT" - case sshFxpFsetstat: - return "SSH_FXP_FSETSTAT" - case sshFxpOpendir: - return "SSH_FXP_OPENDIR" - case sshFxpReaddir: - return "SSH_FXP_READDIR" - case sshFxpRemove: - return "SSH_FXP_REMOVE" - case sshFxpMkdir: - return "SSH_FXP_MKDIR" - case sshFxpRmdir: - return "SSH_FXP_RMDIR" - case sshFxpRealpath: - return "SSH_FXP_REALPATH" - case sshFxpStat: - return "SSH_FXP_STAT" - case sshFxpRename: - return "SSH_FXP_RENAME" - case sshFxpReadlink: - return "SSH_FXP_READLINK" - case sshFxpSymlink: - return "SSH_FXP_SYMLINK" - case sshFxpStatus: - return "SSH_FXP_STATUS" - case sshFxpHandle: - return "SSH_FXP_HANDLE" - case sshFxpData: - return "SSH_FXP_DATA" - case sshFxpName: - return "SSH_FXP_NAME" - case sshFxpAttrs: - return "SSH_FXP_ATTRS" - case sshFxpExtended: - return "SSH_FXP_EXTENDED" - case sshFxpExtendedReply: - return "SSH_FXP_EXTENDED_REPLY" - default: - return "unknown" - } -} - -type fx uint8 - -func (f fx) String() string { - switch f { - case sshFxOk: - return "SSH_FX_OK" - case sshFxEOF: - return "SSH_FX_EOF" - case sshFxNoSuchFile: - return "SSH_FX_NO_SUCH_FILE" - case sshFxPermissionDenied: - return "SSH_FX_PERMISSION_DENIED" - case sshFxFailure: - return "SSH_FX_FAILURE" - case sshFxBadMessage: - return "SSH_FX_BAD_MESSAGE" - case sshFxNoConnection: - return "SSH_FX_NO_CONNECTION" - case sshFxConnectionLost: - return "SSH_FX_CONNECTION_LOST" - case sshFxOPUnsupported: - return "SSH_FX_OP_UNSUPPORTED" - default: - return "unknown" - } -} - -type unexpectedPacketErr struct { - want, got uint8 -} - -func (u *unexpectedPacketErr) Error() string { - return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got)) -} - -func unimplementedPacketErr(u uint8) error { - return fmt.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) -} - -type unexpectedIDErr struct{ want, got uint32 } - -func (u *unexpectedIDErr) Error() string { - return fmt.Sprintf("sftp: unexpected id: want %d, got %d", u.want, u.got) -} - -func unimplementedSeekWhence(whence int) error { - return fmt.Errorf("sftp: unimplemented seek whence %d", whence) -} - -func unexpectedCount(want, got uint32) error { - return fmt.Errorf("sftp: unexpected count: want %d, got %d", want, got) -} - -type unexpectedVersionErr struct{ want, got uint32 } - -func (u *unexpectedVersionErr) Error() string { - return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got) -} - -// A StatusError is returned when an SFTP operation fails, and provides -// additional information about the failure. -type StatusError struct { - Code uint32 - msg, lang string -} - -func (s *StatusError) Error() string { - return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) -} - -// FxCode returns the error code typed to match against the exported codes -func (s *StatusError) FxCode() fxerr { - return fxerr(s.Code) -} - -func getSupportedExtensionByName(extensionName string) (sshExtensionPair, error) { - for _, supportedExtension := range supportedSFTPExtensions { - if supportedExtension.Name == extensionName { - return supportedExtension, nil - } - } - return sshExtensionPair{}, fmt.Errorf("unsupported extension: %s", extensionName) -} - -// SetSFTPExtensions allows to customize the supported server extensions. -// See the variable supportedSFTPExtensions for supported extensions. -// This method accepts a slice of sshExtensionPair names for example 'hardlink@openssh.com'. -// If an invalid extension is given an error will be returned and nothing will be changed -func SetSFTPExtensions(extensions ...string) error { - tempExtensions := []sshExtensionPair{} - for _, extension := range extensions { - sftpExtension, err := getSupportedExtensionByName(extension) - if err != nil { - return err - } - tempExtensions = append(tempExtensions, sftpExtension) - } - sftpExtensions = tempExtensions - return nil -} diff --git a/vendor/github.com/pkg/sftp/stat_plan9.go b/vendor/github.com/pkg/sftp/stat_plan9.go deleted file mode 100644 index 761abdf5..00000000 --- a/vendor/github.com/pkg/sftp/stat_plan9.go +++ /dev/null @@ -1,103 +0,0 @@ -package sftp - -import ( - "os" - "syscall" -) - -var EBADF = syscall.NewError("fd out of range or not open") - -func wrapPathError(filepath string, err error) error { - if errno, ok := err.(syscall.ErrorString); ok { - return &os.PathError{Path: filepath, Err: errno} - } - return err -} - -// translateErrno translates a syscall error number to a SFTP error code. -func translateErrno(errno syscall.ErrorString) uint32 { - switch errno { - case "": - return sshFxOk - case syscall.ENOENT: - return sshFxNoSuchFile - case syscall.EPERM: - return sshFxPermissionDenied - } - - return sshFxFailure -} - -func translateSyscallError(err error) (uint32, bool) { - switch e := err.(type) { - case syscall.ErrorString: - return translateErrno(e), true - case *os.PathError: - debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) - if errno, ok := e.Err.(syscall.ErrorString); ok { - return translateErrno(errno), true - } - } - return 0, false -} - -// isRegular returns true if the mode describes a regular file. -func isRegular(mode uint32) bool { - return mode&S_IFMT == syscall.S_IFREG -} - -// toFileMode converts sftp filemode bits to the os.FileMode specification -func toFileMode(mode uint32) os.FileMode { - var fm = os.FileMode(mode & 0777) - - switch mode & S_IFMT { - case syscall.S_IFBLK: - fm |= os.ModeDevice - case syscall.S_IFCHR: - fm |= os.ModeDevice | os.ModeCharDevice - case syscall.S_IFDIR: - fm |= os.ModeDir - case syscall.S_IFIFO: - fm |= os.ModeNamedPipe - case syscall.S_IFLNK: - fm |= os.ModeSymlink - case syscall.S_IFREG: - // nothing to do - case syscall.S_IFSOCK: - fm |= os.ModeSocket - } - - return fm -} - -// fromFileMode converts from the os.FileMode specification to sftp filemode bits -func fromFileMode(mode os.FileMode) uint32 { - ret := uint32(mode & os.ModePerm) - - switch mode & os.ModeType { - case os.ModeDevice | os.ModeCharDevice: - ret |= syscall.S_IFCHR - case os.ModeDevice: - ret |= syscall.S_IFBLK - case os.ModeDir: - ret |= syscall.S_IFDIR - case os.ModeNamedPipe: - ret |= syscall.S_IFIFO - case os.ModeSymlink: - ret |= syscall.S_IFLNK - case 0: - ret |= syscall.S_IFREG - case os.ModeSocket: - ret |= syscall.S_IFSOCK - } - - return ret -} - -// Plan 9 doesn't have setuid, setgid or sticky, but a Plan 9 client should -// be able to send these bits to a POSIX server. -const ( - s_ISUID = 04000 - s_ISGID = 02000 - s_ISVTX = 01000 -) diff --git a/vendor/github.com/pkg/sftp/stat_posix.go b/vendor/github.com/pkg/sftp/stat_posix.go deleted file mode 100644 index 5b870e23..00000000 --- a/vendor/github.com/pkg/sftp/stat_posix.go +++ /dev/null @@ -1,124 +0,0 @@ -//go:build !plan9 -// +build !plan9 - -package sftp - -import ( - "os" - "syscall" -) - -const EBADF = syscall.EBADF - -func wrapPathError(filepath string, err error) error { - if errno, ok := err.(syscall.Errno); ok { - return &os.PathError{Path: filepath, Err: errno} - } - return err -} - -// translateErrno translates a syscall error number to a SFTP error code. -func translateErrno(errno syscall.Errno) uint32 { - switch errno { - case 0: - return sshFxOk - case syscall.ENOENT: - return sshFxNoSuchFile - case syscall.EACCES, syscall.EPERM: - return sshFxPermissionDenied - } - - return sshFxFailure -} - -func translateSyscallError(err error) (uint32, bool) { - switch e := err.(type) { - case syscall.Errno: - return translateErrno(e), true - case *os.PathError: - debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) - if errno, ok := e.Err.(syscall.Errno); ok { - return translateErrno(errno), true - } - } - return 0, false -} - -// isRegular returns true if the mode describes a regular file. -func isRegular(mode uint32) bool { - return mode&S_IFMT == syscall.S_IFREG -} - -// toFileMode converts sftp filemode bits to the os.FileMode specification -func toFileMode(mode uint32) os.FileMode { - var fm = os.FileMode(mode & 0777) - - switch mode & S_IFMT { - case syscall.S_IFBLK: - fm |= os.ModeDevice - case syscall.S_IFCHR: - fm |= os.ModeDevice | os.ModeCharDevice - case syscall.S_IFDIR: - fm |= os.ModeDir - case syscall.S_IFIFO: - fm |= os.ModeNamedPipe - case syscall.S_IFLNK: - fm |= os.ModeSymlink - case syscall.S_IFREG: - // nothing to do - case syscall.S_IFSOCK: - fm |= os.ModeSocket - } - - if mode&syscall.S_ISUID != 0 { - fm |= os.ModeSetuid - } - if mode&syscall.S_ISGID != 0 { - fm |= os.ModeSetgid - } - if mode&syscall.S_ISVTX != 0 { - fm |= os.ModeSticky - } - - return fm -} - -// fromFileMode converts from the os.FileMode specification to sftp filemode bits -func fromFileMode(mode os.FileMode) uint32 { - ret := uint32(mode & os.ModePerm) - - switch mode & os.ModeType { - case os.ModeDevice | os.ModeCharDevice: - ret |= syscall.S_IFCHR - case os.ModeDevice: - ret |= syscall.S_IFBLK - case os.ModeDir: - ret |= syscall.S_IFDIR - case os.ModeNamedPipe: - ret |= syscall.S_IFIFO - case os.ModeSymlink: - ret |= syscall.S_IFLNK - case 0: - ret |= syscall.S_IFREG - case os.ModeSocket: - ret |= syscall.S_IFSOCK - } - - if mode&os.ModeSetuid != 0 { - ret |= syscall.S_ISUID - } - if mode&os.ModeSetgid != 0 { - ret |= syscall.S_ISGID - } - if mode&os.ModeSticky != 0 { - ret |= syscall.S_ISVTX - } - - return ret -} - -const ( - s_ISUID = syscall.S_ISUID - s_ISGID = syscall.S_ISGID - s_ISVTX = syscall.S_ISVTX -) diff --git a/vendor/github.com/pkg/sftp/syscall_fixed.go b/vendor/github.com/pkg/sftp/syscall_fixed.go deleted file mode 100644 index d4045777..00000000 --- a/vendor/github.com/pkg/sftp/syscall_fixed.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build plan9 windows js,wasm - -// Go defines S_IFMT on windows, plan9 and js/wasm as 0x1f000 instead of -// 0xf000. None of the the other S_IFxyz values include the "1" (in 0x1f000) -// which prevents them from matching the bitmask. - -package sftp - -const S_IFMT = 0xf000 diff --git a/vendor/github.com/pkg/sftp/syscall_good.go b/vendor/github.com/pkg/sftp/syscall_good.go deleted file mode 100644 index 4c2b240c..00000000 --- a/vendor/github.com/pkg/sftp/syscall_good.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !plan9,!windows -// +build !js !wasm - -package sftp - -import "syscall" - -const S_IFMT = syscall.S_IFMT diff --git a/vendor/github.com/pkg/xattr/.gitignore b/vendor/github.com/pkg/xattr/.gitignore deleted file mode 100644 index d8b32652..00000000 --- a/vendor/github.com/pkg/xattr/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.DS_Store - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.swp diff --git a/vendor/github.com/pkg/xattr/LICENSE b/vendor/github.com/pkg/xattr/LICENSE deleted file mode 100644 index 99d2e9dc..00000000 --- a/vendor/github.com/pkg/xattr/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2012 Dave Cheney. All rights reserved. -Copyright (c) 2014 Kuba Podgórski. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/xattr/README.md b/vendor/github.com/pkg/xattr/README.md deleted file mode 100644 index 0662c020..00000000 --- a/vendor/github.com/pkg/xattr/README.md +++ /dev/null @@ -1,45 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/pkg/xattr?status.svg)](http://godoc.org/github.com/pkg/xattr) -[![Go Report Card](https://goreportcard.com/badge/github.com/pkg/xattr)](https://goreportcard.com/report/github.com/pkg/xattr) -[![Build Status](https://github.com/pkg/xattr/workflows/build/badge.svg)](https://github.com/pkg/xattr/actions?query=workflow%3Abuild) -[![Codecov](https://codecov.io/gh/pkg/xattr/branch/master/graph/badge.svg)](https://codecov.io/gh/pkg/xattr) - -xattr -===== -Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris). - -"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes) - -`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored. - -The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they -do not reference a symlink that appears at the end of a path. See -[GoDoc](http://godoc.org/github.com/pkg/xattr) for details. - -### Example -```go - const path = "/tmp/myfile" - const prefix = "user." - - if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil { - log.Fatal(err) - } - - var list []string - if list, err = xattr.List(path); err != nil { - log.Fatal(err) - } - - var data []byte - if data, err = xattr.Get(path, prefix+"test"); err != nil { - log.Fatal(err) - } - - if err = xattr.Remove(path, prefix+"test"); err != nil { - log.Fatal(err) - } - - // One can also specify the flags parameter to be passed to the OS. - if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil { - log.Fatal(err) - } -``` diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go deleted file mode 100644 index 8b2b5fe8..00000000 --- a/vendor/github.com/pkg/xattr/xattr.go +++ /dev/null @@ -1,257 +0,0 @@ -/* -Package xattr provides support for extended attributes on linux, darwin and freebsd. -Extended attributes are name:value pairs associated permanently with files and directories, -similar to the environment strings associated with a process. -An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty. -More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes . - -All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L" -variant will not follow a symlink at the end of the path, and "F" variant accepts -a file descriptor instead of a path. - -Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are -symlinks: -Get will follow "symlink1" and "symlink2" and operate on the target of -"symlink2". LGet will follow "symlink1" but operate directly on "symlink2". -*/ -package xattr - -import ( - "os" - "syscall" -) - -// Error records an error and the operation, file path and attribute that caused it. -type Error struct { - Op string - Path string - Name string - Err error -} - -func (e *Error) Unwrap() error { return e.Err } - -func (e *Error) Error() (errstr string) { - if e.Op != "" { - errstr += e.Op - } - if e.Path != "" { - if errstr != "" { - errstr += " " - } - errstr += e.Path - } - if e.Name != "" { - if errstr != "" { - errstr += " " - } - errstr += e.Name - } - if e.Err != nil { - if errstr != "" { - errstr += ": " - } - errstr += e.Err.Error() - } - return -} - -// Get retrieves extended attribute data associated with path. It will follow -// all symlinks along the path. -func Get(path, name string) ([]byte, error) { - return get(path, name, func(name string, data []byte) (int, error) { - return getxattr(path, name, data) - }) -} - -// LGet is like Get but does not follow a symlink at the end of the path. -func LGet(path, name string) ([]byte, error) { - return get(path, name, func(name string, data []byte) (int, error) { - return lgetxattr(path, name, data) - }) -} - -// FGet is like Get but accepts a os.File instead of a file path. -func FGet(f *os.File, name string) ([]byte, error) { - return get(f.Name(), name, func(name string, data []byte) (int, error) { - return fgetxattr(f, name, data) - }) -} - -type getxattrFunc func(name string, data []byte) (int, error) - -// get contains the buffer allocation logic used by both Get and LGet. -func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) { - const ( - // Start with a 1 KB buffer for the xattr value - initialBufSize = 1024 - - // The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's - // much smaller at 64 KB. Unless the kernel is evil or buggy, we should never - // hit the limit. - maxBufSize = 64 * 1024 * 1024 - - // Function name as reported in error messages - myname = "xattr.get" - ) - - size := initialBufSize - for { - data := make([]byte, size) - read, err := getxattrFunc(name, data) - - // If the buffer was too small to fit the value, Linux and MacOS react - // differently: - // Linux: returns an ERANGE error and "-1" bytes. - // MacOS: truncates the value and returns "size" bytes. If the value - // happens to be exactly as big as the buffer, we cannot know if it was - // truncated, and we retry with a bigger buffer. Contrary to documentation, - // MacOS never seems to return ERANGE! - // To keep the code simple, we always check both conditions, and sometimes - // double the buffer size without it being strictly necessary. - if err == syscall.ERANGE || read == size { - // The buffer was too small. Try again. - size <<= 1 - if size >= maxBufSize { - return nil, &Error{myname, path, name, syscall.EOVERFLOW} - } - continue - } - if err != nil { - return nil, &Error{myname, path, name, err} - } - return data[:read], nil - } -} - -// Set associates name and data together as an attribute of path. -func Set(path, name string, data []byte) error { - if err := setxattr(path, name, data, 0); err != nil { - return &Error{"xattr.Set", path, name, err} - } - return nil -} - -// LSet is like Set but does not follow a symlink at -// the end of the path. -func LSet(path, name string, data []byte) error { - if err := lsetxattr(path, name, data, 0); err != nil { - return &Error{"xattr.LSet", path, name, err} - } - return nil -} - -// FSet is like Set but accepts a os.File instead of a file path. -func FSet(f *os.File, name string, data []byte) error { - if err := fsetxattr(f, name, data, 0); err != nil { - return &Error{"xattr.FSet", f.Name(), name, err} - } - return nil -} - -// SetWithFlags associates name and data together as an attribute of path. -// Forwards the flags parameter to the syscall layer. -func SetWithFlags(path, name string, data []byte, flags int) error { - if err := setxattr(path, name, data, flags); err != nil { - return &Error{"xattr.SetWithFlags", path, name, err} - } - return nil -} - -// LSetWithFlags is like SetWithFlags but does not follow a symlink at -// the end of the path. -func LSetWithFlags(path, name string, data []byte, flags int) error { - if err := lsetxattr(path, name, data, flags); err != nil { - return &Error{"xattr.LSetWithFlags", path, name, err} - } - return nil -} - -// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path. -func FSetWithFlags(f *os.File, name string, data []byte, flags int) error { - if err := fsetxattr(f, name, data, flags); err != nil { - return &Error{"xattr.FSetWithFlags", f.Name(), name, err} - } - return nil -} - -// Remove removes the attribute associated with the given path. -func Remove(path, name string) error { - if err := removexattr(path, name); err != nil { - return &Error{"xattr.Remove", path, name, err} - } - return nil -} - -// LRemove is like Remove but does not follow a symlink at the end of the -// path. -func LRemove(path, name string) error { - if err := lremovexattr(path, name); err != nil { - return &Error{"xattr.LRemove", path, name, err} - } - return nil -} - -// FRemove is like Remove but accepts a os.File instead of a file path. -func FRemove(f *os.File, name string) error { - if err := fremovexattr(f, name); err != nil { - return &Error{"xattr.FRemove", f.Name(), name, err} - } - return nil -} - -// List retrieves a list of names of extended attributes associated -// with the given path in the file system. -func List(path string) ([]string, error) { - return list(path, func(data []byte) (int, error) { - return listxattr(path, data) - }) -} - -// LList is like List but does not follow a symlink at the end of the -// path. -func LList(path string) ([]string, error) { - return list(path, func(data []byte) (int, error) { - return llistxattr(path, data) - }) -} - -// FList is like List but accepts a os.File instead of a file path. -func FList(f *os.File) ([]string, error) { - return list(f.Name(), func(data []byte) (int, error) { - return flistxattr(f, data) - }) -} - -type listxattrFunc func(data []byte) (int, error) - -// list contains the buffer allocation logic used by both List and LList. -func list(path string, listxattrFunc listxattrFunc) ([]string, error) { - myname := "xattr.list" - // find size. - size, err := listxattrFunc(nil) - if err != nil { - return nil, &Error{myname, path, "", err} - } - if size > 0 { - // `size + 1` because of ERANGE error when reading - // from a SMB1 mount point (https://github.com/pkg/xattr/issues/16). - buf := make([]byte, size+1) - // Read into buffer of that size. - read, err := listxattrFunc(buf) - if err != nil { - return nil, &Error{myname, path, "", err} - } - return stringsFromByteSlice(buf[:read]), nil - } - return []string{}, nil -} - -// bytePtrFromSlice returns a pointer to array of bytes and a size. -func bytePtrFromSlice(data []byte) (ptr *byte, size int) { - size = len(data) - if size > 0 { - ptr = &data[0] - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_bsd.go b/vendor/github.com/pkg/xattr/xattr_bsd.go deleted file mode 100644 index f4a3f953..00000000 --- a/vendor/github.com/pkg/xattr/xattr_bsd.go +++ /dev/null @@ -1,201 +0,0 @@ -//go:build freebsd || netbsd -// +build freebsd netbsd - -package xattr - -import ( - "os" - "syscall" - "unsafe" -) - -const ( - // XATTR_SUPPORTED will be true if the current platform is supported - XATTR_SUPPORTED = true - - EXTATTR_NAMESPACE_USER = 1 - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENOATTR -) - -func getxattr(path string, name string, data []byte) (int, error) { - return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data) -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data) -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return getxattr(f.Name(), name, data) -} - -// sysGet is called by getxattr and lgetxattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) { - ptr, nbytes := bytePtrFromSlice(data) - /* - ssize_t extattr_get_file( - const char *path, - int attrnamespace, - const char *attrname, - void *data, - size_t nbytes); - - ssize_t extattr_get_link( - const char *path, - int attrnamespace, - const char *attrname, - void *data, - size_t nbytes); - */ - r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), - uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) - if err != syscall.Errno(0) { - return int(r0), err - } - return int(r0), nil -} - -func setxattr(path string, name string, data []byte, flags int) error { - return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data) -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data) -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return setxattr(f.Name(), name, data, flags) -} - -// sysSet is called by setxattr and lsetxattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysSet(syscallNum uintptr, path string, name string, data []byte) error { - ptr, nbytes := bytePtrFromSlice(data) - /* - ssize_t extattr_set_file( - const char *path, - int attrnamespace, - const char *attrname, - const void *data, - size_t nbytes - ); - - ssize_t extattr_set_link( - const char *path, - int attrnamespace, - const char *attrname, - const void *data, - size_t nbytes - ); - */ - r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), - uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) - if err != syscall.Errno(0) { - return err - } - if int(r0) != nbytes { - return syscall.E2BIG - } - return nil -} - -func removexattr(path string, name string) error { - return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name) -} - -func lremovexattr(path string, name string) error { - return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name) -} - -func fremovexattr(f *os.File, name string) error { - return removexattr(f.Name(), name) -} - -// sysSet is called by removexattr and lremovexattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysRemove(syscallNum uintptr, path string, name string) error { - /* - int extattr_delete_file( - const char *path, - int attrnamespace, - const char *attrname - ); - - int extattr_delete_link( - const char *path, - int attrnamespace, - const char *attrname - ); - */ - _, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), - ) - if err != syscall.Errno(0) { - return err - } - return nil -} - -func listxattr(path string, data []byte) (int, error) { - return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data) -} - -func llistxattr(path string, data []byte) (int, error) { - return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data) -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return listxattr(f.Name(), data) -} - -// sysSet is called by listxattr and llistxattr with the appropriate syscall -// number. This works because syscalls have the same signature and return -// values. -func sysList(syscallNum uintptr, path string, data []byte) (int, error) { - ptr, nbytes := bytePtrFromSlice(data) - /* - ssize_t extattr_list_file( - const char *path, - int attrnamespace, - void *data, - size_t nbytes - ); - - ssize_t extattr_list_link( - const char *path, - int attrnamespace, - void *data, - size_t nbytes - ); - */ - r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), - EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0) - if err != syscall.Errno(0) { - return int(r0), err - } - return int(r0), nil -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On FreeBSD, each entry consists of a single byte containing the length -// of the attribute name, followed by the attribute name. -// The name is _not_ terminated by NULL. -func stringsFromByteSlice(buf []byte) (result []string) { - index := 0 - for index < len(buf) { - next := index + 1 + int(buf[index]) - result = append(result, string(buf[index+1:next])) - index = next - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_darwin.go b/vendor/github.com/pkg/xattr/xattr_darwin.go deleted file mode 100644 index ee7a501d..00000000 --- a/vendor/github.com/pkg/xattr/xattr_darwin.go +++ /dev/null @@ -1,90 +0,0 @@ -//go:build darwin -// +build darwin - -package xattr - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html -const ( - // XATTR_SUPPORTED will be true if the current platform is supported - XATTR_SUPPORTED = true - - XATTR_NOFOLLOW = 0x0001 - XATTR_CREATE = 0x0002 - XATTR_REPLACE = 0x0004 - XATTR_NOSECURITY = 0x0008 - XATTR_NODEFAULT = 0x0010 - XATTR_SHOWCOMPRESSION = 0x0020 - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENOATTR -) - -func getxattr(path string, name string, data []byte) (int, error) { - return unix.Getxattr(path, name, data) -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return unix.Lgetxattr(path, name, data) -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return getxattr(f.Name(), name, data) -} - -func setxattr(path string, name string, data []byte, flags int) error { - return unix.Setxattr(path, name, data, flags) -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return unix.Lsetxattr(path, name, data, flags) -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return setxattr(f.Name(), name, data, flags) -} - -func removexattr(path string, name string) error { - return unix.Removexattr(path, name) -} - -func lremovexattr(path string, name string) error { - return unix.Lremovexattr(path, name) -} - -func fremovexattr(f *os.File, name string) error { - return removexattr(f.Name(), name) -} - -func listxattr(path string, data []byte) (int, error) { - return unix.Listxattr(path, data) -} - -func llistxattr(path string, data []byte) (int, error) { - return unix.Llistxattr(path, data) -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return listxattr(f.Name(), data) -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On Darwin and Linux, each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) (result []string) { - offset := 0 - for index, b := range buf { - if b == 0 { - result = append(result, string(buf[offset:index])) - offset = index + 1 - } - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_linux.go b/vendor/github.com/pkg/xattr/xattr_linux.go deleted file mode 100644 index 879085ee..00000000 --- a/vendor/github.com/pkg/xattr/xattr_linux.go +++ /dev/null @@ -1,142 +0,0 @@ -//go:build linux -// +build linux - -package xattr - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -const ( - // XATTR_SUPPORTED will be true if the current platform is supported - XATTR_SUPPORTED = true - - XATTR_CREATE = unix.XATTR_CREATE - XATTR_REPLACE = unix.XATTR_REPLACE - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENODATA -) - -// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system -// calls. This function works around this by retrying system calls until they -// stop returning EINTR. -// -// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce. -func ignoringEINTR(fn func() error) (err error) { - for { - err = fn() - if err != unix.EINTR { - break - } - } - return err -} - -func getxattr(path string, name string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Getxattr(path, name, data) - return err - }) - return r, err -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Lgetxattr(path, name, data) - return err - }) - return r, err -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Fgetxattr(int(f.Fd()), name, data) - return err - }) - return r, err -} - -func setxattr(path string, name string, data []byte, flags int) error { - return ignoringEINTR(func() (err error) { - return unix.Setxattr(path, name, data, flags) - }) -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return ignoringEINTR(func() (err error) { - return unix.Lsetxattr(path, name, data, flags) - }) -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return ignoringEINTR(func() (err error) { - return unix.Fsetxattr(int(f.Fd()), name, data, flags) - }) -} - -func removexattr(path string, name string) error { - return ignoringEINTR(func() (err error) { - return unix.Removexattr(path, name) - }) -} - -func lremovexattr(path string, name string) error { - return ignoringEINTR(func() (err error) { - return unix.Lremovexattr(path, name) - }) -} - -func fremovexattr(f *os.File, name string) error { - return ignoringEINTR(func() (err error) { - return unix.Fremovexattr(int(f.Fd()), name) - }) -} - -func listxattr(path string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Listxattr(path, data) - return err - }) - return r, err -} - -func llistxattr(path string, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Llistxattr(path, data) - return err - }) - return r, err -} - -func flistxattr(f *os.File, data []byte) (int, error) { - var r int - err := ignoringEINTR(func() (err error) { - r, err = unix.Flistxattr(int(f.Fd()), data) - return err - }) - return r, err -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On Darwin and Linux, each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) (result []string) { - offset := 0 - for index, b := range buf { - if b == 0 { - result = append(result, string(buf[offset:index])) - offset = index + 1 - } - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_solaris.go b/vendor/github.com/pkg/xattr/xattr_solaris.go deleted file mode 100644 index 8d65b8d8..00000000 --- a/vendor/github.com/pkg/xattr/xattr_solaris.go +++ /dev/null @@ -1,165 +0,0 @@ -//go:build solaris -// +build solaris - -package xattr - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -const ( - // XATTR_SUPPORTED will be true if the current platform is supported - XATTR_SUPPORTED = true - - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - - // ENOATTR is not exported by the syscall package on Linux, because it is - // an alias for ENODATA. We export it here so it is available on all - // our supported platforms. - ENOATTR = syscall.ENODATA -) - -func getxattr(path string, name string, data []byte) (int, error) { - f, err := os.OpenFile(path, os.O_RDONLY, 0) - if err != nil { - return 0, err - } - defer func() { - _ = f.Close() - }() - return fgetxattr(f, name, data) -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return 0, unix.ENOTSUP -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0) - if err != nil { - return 0, err - } - defer func() { - _ = unix.Close(fd) - }() - return unix.Read(fd, data) -} - -func setxattr(path string, name string, data []byte, flags int) error { - f, err := os.OpenFile(path, os.O_RDONLY, 0) - if err != nil { - return err - } - err = fsetxattr(f, name, data, flags) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return unix.ENOTSUP -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - mode := unix.O_WRONLY | unix.O_XATTR - if flags&XATTR_REPLACE != 0 { - mode |= unix.O_TRUNC - } else if flags&XATTR_CREATE != 0 { - mode |= unix.O_CREAT | unix.O_EXCL - } else { - mode |= unix.O_CREAT | unix.O_TRUNC - } - fd, err := unix.Openat(int(f.Fd()), name, mode, 0666) - if err != nil { - return err - } - if _, err = unix.Write(fd, data); err != nil { - _ = unix.Close(fd) - return err - } - return unix.Close(fd) -} - -func removexattr(path string, name string) error { - fd, err := unix.Open(path, unix.O_RDONLY|unix.O_XATTR, 0) - if err != nil { - return err - } - f := os.NewFile(uintptr(fd), path) - defer func() { - _ = f.Close() - }() - return fremovexattr(f, name) -} - -func lremovexattr(path string, name string) error { - return unix.ENOTSUP -} - -func fremovexattr(f *os.File, name string) error { - fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0) - if err != nil { - return err - } - defer func() { - _ = unix.Close(fd) - }() - return unix.Unlinkat(fd, name, 0) -} - -func listxattr(path string, data []byte) (int, error) { - f, err := os.OpenFile(path, os.O_RDONLY, 0) - if err != nil { - return 0, err - } - defer func() { - _ = f.Close() - }() - return flistxattr(f, data) -} - -func llistxattr(path string, data []byte) (int, error) { - return 0, unix.ENOTSUP -} - -func flistxattr(f *os.File, data []byte) (int, error) { - fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0) - if err != nil { - return 0, unix.ENOTSUP - } - xf := os.NewFile(uintptr(fd), f.Name()) - defer func() { - _ = xf.Close() - }() - names, err := xf.Readdirnames(-1) - if err != nil { - return 0, err - } - var buf []byte - for _, name := range names { - buf = append(buf, append([]byte(name), '\000')...) - } - if data == nil { - return len(buf), nil - } - return copy(data, buf), nil -} - -// stringsFromByteSlice converts a sequence of attributes to a []string. -// On Darwin and Linux, each entry is a NULL-terminated string. -func stringsFromByteSlice(buf []byte) (result []string) { - offset := 0 - for index, b := range buf { - if b == 0 { - result = append(result, string(buf[offset:index])) - offset = index + 1 - } - } - return -} diff --git a/vendor/github.com/pkg/xattr/xattr_unsupported.go b/vendor/github.com/pkg/xattr/xattr_unsupported.go deleted file mode 100644 index 8886fbdc..00000000 --- a/vendor/github.com/pkg/xattr/xattr_unsupported.go +++ /dev/null @@ -1,70 +0,0 @@ -//go:build !linux && !freebsd && !netbsd && !darwin && !solaris -// +build !linux,!freebsd,!netbsd,!darwin,!solaris - -package xattr - -import ( - "os" - "syscall" -) - -const ( - // We need to use the default for non supported operating systems - ENOATTR = syscall.Errno(0x59) -) - -// XATTR_SUPPORTED will be true if the current platform is supported -const XATTR_SUPPORTED = false - -func getxattr(path string, name string, data []byte) (int, error) { - return 0, nil -} - -func lgetxattr(path string, name string, data []byte) (int, error) { - return 0, nil -} - -func fgetxattr(f *os.File, name string, data []byte) (int, error) { - return 0, nil -} - -func setxattr(path string, name string, data []byte, flags int) error { - return nil -} - -func lsetxattr(path string, name string, data []byte, flags int) error { - return nil -} - -func fsetxattr(f *os.File, name string, data []byte, flags int) error { - return nil -} - -func removexattr(path string, name string) error { - return nil -} - -func lremovexattr(path string, name string) error { - return nil -} - -func fremovexattr(f *os.File, name string) error { - return nil -} - -func listxattr(path string, data []byte) (int, error) { - return 0, nil -} - -func llistxattr(path string, data []byte) (int, error) { - return 0, nil -} - -func flistxattr(f *os.File, data []byte) (int, error) { - return 0, nil -} - -// dummy -func stringsFromByteSlice(buf []byte) (result []string) { - return []string{} -} diff --git a/vendor/github.com/robfig/cron/v3/.gitignore b/vendor/github.com/robfig/cron/v3/.gitignore deleted file mode 100644 index 00268614..00000000 --- a/vendor/github.com/robfig/cron/v3/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/robfig/cron/v3/.travis.yml b/vendor/github.com/robfig/cron/v3/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/robfig/cron/v3/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/robfig/cron/v3/LICENSE b/vendor/github.com/robfig/cron/v3/LICENSE deleted file mode 100644 index 3a0f627f..00000000 --- a/vendor/github.com/robfig/cron/v3/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (C) 2012 Rob Figueiredo -All Rights Reserved. - -MIT LICENSE - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robfig/cron/v3/README.md b/vendor/github.com/robfig/cron/v3/README.md deleted file mode 100644 index 984c537c..00000000 --- a/vendor/github.com/robfig/cron/v3/README.md +++ /dev/null @@ -1,125 +0,0 @@ -[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) -[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) - -# cron - -Cron V3 has been released! - -To download the specific tagged release, run: - - go get github.com/robfig/cron/v3@v3.0.0 - -Import it in your program as: - - import "github.com/robfig/cron/v3" - -It requires Go 1.11 or later due to usage of Go Modules. - -Refer to the documentation here: -http://godoc.org/github.com/robfig/cron - -The rest of this document describes the the advances in v3 and a list of -breaking changes for users that wish to upgrade from an earlier version. - -## Upgrading to v3 (June 2019) - -cron v3 is a major upgrade to the library that addresses all outstanding bugs, -feature requests, and rough edges. It is based on a merge of master which -contains various fixes to issues found over the years and the v2 branch which -contains some backwards-incompatible features like the ability to remove cron -jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like -the timezone support, and fixes a number of bugs. - -New features: - -- Support for Go modules. Callers must now import this library as - `github.com/robfig/cron/v3`, instead of `gopkg.in/...` - -- Fixed bugs: - - 0f01e6b parser: fix combining of Dow and Dom (#70) - - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157) - - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144) - - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97) - - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206) - -- Standard cron spec parsing by default (first field is "minute"), with an easy - way to opt into the seconds field (quartz-compatible). Although, note that the - year field (optional in Quartz) is not supported. - -- Extensible, key/value logging via an interface that complies with - the https://github.com/go-logr/logr project. - -- The new Chain & JobWrapper types allow you to install "interceptors" to add - cross-cutting behavior like the following: - - Recover any panics from jobs - - Delay a job's execution if the previous run hasn't completed yet - - Skip a job's execution if the previous run hasn't completed yet - - Log each job's invocations - - Notification when jobs are completed - -It is backwards incompatible with both v1 and v2. These updates are required: - -- The v1 branch accepted an optional seconds field at the beginning of the cron - spec. This is non-standard and has led to a lot of confusion. The new default - parser conforms to the standard as described by [the Cron wikipedia page]. - - UPDATING: To retain the old behavior, construct your Cron with a custom - parser: - - // Seconds field, required - cron.New(cron.WithSeconds()) - - // Seconds field, optional - cron.New( - cron.WithParser( - cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)) - -- The Cron type now accepts functional options on construction rather than the - previous ad-hoc behavior modification mechanisms (setting a field, calling a setter). - - UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be - updated to provide those values on construction. - -- CRON_TZ is now the recommended way to specify the timezone of a single - schedule, which is sanctioned by the specification. The legacy "TZ=" prefix - will continue to be supported since it is unambiguous and easy to do so. - - UPDATING: No update is required. - -- By default, cron will no longer recover panics in jobs that it runs. - Recovering can be surprising (see issue #192) and seems to be at odds with - typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option - has been removed to accommodate the more general JobWrapper type. - - UPDATING: To opt into panic recovery and configure the panic logger: - - cron.New(cron.WithChain( - cron.Recover(logger), // or use cron.DefaultLogger - )) - -- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was - removed, since it is duplicative with the leveled logging. - - UPDATING: Callers should use `WithLogger` and specify a logger that does not - discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`: - - cron.New( - cron.WithLogger(cron.VerbosePrintfLogger(logger))) - - -### Background - Cron spec format - -There are two cron spec formats in common usage: - -- The "standard" cron format, described on [the Cron wikipedia page] and used by - the cron Linux system utility. - -- The cron format used by [the Quartz Scheduler], commonly used for scheduled - jobs in Java software - -[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron -[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html - -The original version of this package included an optional "seconds" field, which -made it incompatible with both of these formats. Now, the "standard" format is -the default format accepted, and the Quartz format is opt-in. diff --git a/vendor/github.com/robfig/cron/v3/chain.go b/vendor/github.com/robfig/cron/v3/chain.go deleted file mode 100644 index 9565b418..00000000 --- a/vendor/github.com/robfig/cron/v3/chain.go +++ /dev/null @@ -1,92 +0,0 @@ -package cron - -import ( - "fmt" - "runtime" - "sync" - "time" -) - -// JobWrapper decorates the given Job with some behavior. -type JobWrapper func(Job) Job - -// Chain is a sequence of JobWrappers that decorates submitted jobs with -// cross-cutting behaviors like logging or synchronization. -type Chain struct { - wrappers []JobWrapper -} - -// NewChain returns a Chain consisting of the given JobWrappers. -func NewChain(c ...JobWrapper) Chain { - return Chain{c} -} - -// Then decorates the given job with all JobWrappers in the chain. -// -// This: -// NewChain(m1, m2, m3).Then(job) -// is equivalent to: -// m1(m2(m3(job))) -func (c Chain) Then(j Job) Job { - for i := range c.wrappers { - j = c.wrappers[len(c.wrappers)-i-1](j) - } - return j -} - -// Recover panics in wrapped jobs and log them with the provided logger. -func Recover(logger Logger) JobWrapper { - return func(j Job) Job { - return FuncJob(func() { - defer func() { - if r := recover(); r != nil { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - err, ok := r.(error) - if !ok { - err = fmt.Errorf("%v", r) - } - logger.Error(err, "panic", "stack", "...\n"+string(buf)) - } - }() - j.Run() - }) - } -} - -// DelayIfStillRunning serializes jobs, delaying subsequent runs until the -// previous one is complete. Jobs running after a delay of more than a minute -// have the delay logged at Info. -func DelayIfStillRunning(logger Logger) JobWrapper { - return func(j Job) Job { - var mu sync.Mutex - return FuncJob(func() { - start := time.Now() - mu.Lock() - defer mu.Unlock() - if dur := time.Since(start); dur > time.Minute { - logger.Info("delay", "duration", dur) - } - j.Run() - }) - } -} - -// SkipIfStillRunning skips an invocation of the Job if a previous invocation is -// still running. It logs skips to the given logger at Info level. -func SkipIfStillRunning(logger Logger) JobWrapper { - return func(j Job) Job { - var ch = make(chan struct{}, 1) - ch <- struct{}{} - return FuncJob(func() { - select { - case v := <-ch: - j.Run() - ch <- v - default: - logger.Info("skip") - } - }) - } -} diff --git a/vendor/github.com/robfig/cron/v3/constantdelay.go b/vendor/github.com/robfig/cron/v3/constantdelay.go deleted file mode 100644 index cd6e7b1b..00000000 --- a/vendor/github.com/robfig/cron/v3/constantdelay.go +++ /dev/null @@ -1,27 +0,0 @@ -package cron - -import "time" - -// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". -// It does not support jobs more frequent than once a second. -type ConstantDelaySchedule struct { - Delay time.Duration -} - -// Every returns a crontab Schedule that activates once every duration. -// Delays of less than a second are not supported (will round up to 1 second). -// Any fields less than a Second are truncated. -func Every(duration time.Duration) ConstantDelaySchedule { - if duration < time.Second { - duration = time.Second - } - return ConstantDelaySchedule{ - Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, - } -} - -// Next returns the next time this should be run. -// This rounds so that the next activation time will be on the second. -func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { - return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) -} diff --git a/vendor/github.com/robfig/cron/v3/cron.go b/vendor/github.com/robfig/cron/v3/cron.go deleted file mode 100644 index c7e91766..00000000 --- a/vendor/github.com/robfig/cron/v3/cron.go +++ /dev/null @@ -1,355 +0,0 @@ -package cron - -import ( - "context" - "sort" - "sync" - "time" -) - -// Cron keeps track of any number of entries, invoking the associated func as -// specified by the schedule. It may be started, stopped, and the entries may -// be inspected while running. -type Cron struct { - entries []*Entry - chain Chain - stop chan struct{} - add chan *Entry - remove chan EntryID - snapshot chan chan []Entry - running bool - logger Logger - runningMu sync.Mutex - location *time.Location - parser ScheduleParser - nextID EntryID - jobWaiter sync.WaitGroup -} - -// ScheduleParser is an interface for schedule spec parsers that return a Schedule -type ScheduleParser interface { - Parse(spec string) (Schedule, error) -} - -// Job is an interface for submitted cron jobs. -type Job interface { - Run() -} - -// Schedule describes a job's duty cycle. -type Schedule interface { - // Next returns the next activation time, later than the given time. - // Next is invoked initially, and then each time the job is run. - Next(time.Time) time.Time -} - -// EntryID identifies an entry within a Cron instance -type EntryID int - -// Entry consists of a schedule and the func to execute on that schedule. -type Entry struct { - // ID is the cron-assigned ID of this entry, which may be used to look up a - // snapshot or remove it. - ID EntryID - - // Schedule on which this job should be run. - Schedule Schedule - - // Next time the job will run, or the zero time if Cron has not been - // started or this entry's schedule is unsatisfiable - Next time.Time - - // Prev is the last time this job was run, or the zero time if never. - Prev time.Time - - // WrappedJob is the thing to run when the Schedule is activated. - WrappedJob Job - - // Job is the thing that was submitted to cron. - // It is kept around so that user code that needs to get at the job later, - // e.g. via Entries() can do so. - Job Job -} - -// Valid returns true if this is not the zero entry. -func (e Entry) Valid() bool { return e.ID != 0 } - -// byTime is a wrapper for sorting the entry array by time -// (with zero time at the end). -type byTime []*Entry - -func (s byTime) Len() int { return len(s) } -func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byTime) Less(i, j int) bool { - // Two zero times should return false. - // Otherwise, zero is "greater" than any other time. - // (To sort it at the end of the list.) - if s[i].Next.IsZero() { - return false - } - if s[j].Next.IsZero() { - return true - } - return s[i].Next.Before(s[j].Next) -} - -// New returns a new Cron job runner, modified by the given options. -// -// Available Settings -// -// Time Zone -// Description: The time zone in which schedules are interpreted -// Default: time.Local -// -// Parser -// Description: Parser converts cron spec strings into cron.Schedules. -// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron -// -// Chain -// Description: Wrap submitted jobs to customize behavior. -// Default: A chain that recovers panics and logs them to stderr. -// -// See "cron.With*" to modify the default behavior. -func New(opts ...Option) *Cron { - c := &Cron{ - entries: nil, - chain: NewChain(), - add: make(chan *Entry), - stop: make(chan struct{}), - snapshot: make(chan chan []Entry), - remove: make(chan EntryID), - running: false, - runningMu: sync.Mutex{}, - logger: DefaultLogger, - location: time.Local, - parser: standardParser, - } - for _, opt := range opts { - opt(c) - } - return c -} - -// FuncJob is a wrapper that turns a func() into a cron.Job -type FuncJob func() - -func (f FuncJob) Run() { f() } - -// AddFunc adds a func to the Cron to be run on the given schedule. -// The spec is parsed using the time zone of this Cron instance as the default. -// An opaque ID is returned that can be used to later remove it. -func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) { - return c.AddJob(spec, FuncJob(cmd)) -} - -// AddJob adds a Job to the Cron to be run on the given schedule. -// The spec is parsed using the time zone of this Cron instance as the default. -// An opaque ID is returned that can be used to later remove it. -func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { - schedule, err := c.parser.Parse(spec) - if err != nil { - return 0, err - } - return c.Schedule(schedule, cmd), nil -} - -// Schedule adds a Job to the Cron to be run on the given schedule. -// The job is wrapped with the configured Chain. -func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { - c.runningMu.Lock() - defer c.runningMu.Unlock() - c.nextID++ - entry := &Entry{ - ID: c.nextID, - Schedule: schedule, - WrappedJob: c.chain.Then(cmd), - Job: cmd, - } - if !c.running { - c.entries = append(c.entries, entry) - } else { - c.add <- entry - } - return entry.ID -} - -// Entries returns a snapshot of the cron entries. -func (c *Cron) Entries() []Entry { - c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - replyChan := make(chan []Entry, 1) - c.snapshot <- replyChan - return <-replyChan - } - return c.entrySnapshot() -} - -// Location gets the time zone location -func (c *Cron) Location() *time.Location { - return c.location -} - -// Entry returns a snapshot of the given entry, or nil if it couldn't be found. -func (c *Cron) Entry(id EntryID) Entry { - for _, entry := range c.Entries() { - if id == entry.ID { - return entry - } - } - return Entry{} -} - -// Remove an entry from being run in the future. -func (c *Cron) Remove(id EntryID) { - c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - c.remove <- id - } else { - c.removeEntry(id) - } -} - -// Start the cron scheduler in its own goroutine, or no-op if already started. -func (c *Cron) Start() { - c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - return - } - c.running = true - go c.run() -} - -// Run the cron scheduler, or no-op if already running. -func (c *Cron) Run() { - c.runningMu.Lock() - if c.running { - c.runningMu.Unlock() - return - } - c.running = true - c.runningMu.Unlock() - c.run() -} - -// run the scheduler.. this is private just due to the need to synchronize -// access to the 'running' state variable. -func (c *Cron) run() { - c.logger.Info("start") - - // Figure out the next activation times for each entry. - now := c.now() - for _, entry := range c.entries { - entry.Next = entry.Schedule.Next(now) - c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next) - } - - for { - // Determine the next entry to run. - sort.Sort(byTime(c.entries)) - - var timer *time.Timer - if len(c.entries) == 0 || c.entries[0].Next.IsZero() { - // If there are no entries yet, just sleep - it still handles new entries - // and stop requests. - timer = time.NewTimer(100000 * time.Hour) - } else { - timer = time.NewTimer(c.entries[0].Next.Sub(now)) - } - - for { - select { - case now = <-timer.C: - now = now.In(c.location) - c.logger.Info("wake", "now", now) - - // Run every entry whose next time was less than now - for _, e := range c.entries { - if e.Next.After(now) || e.Next.IsZero() { - break - } - c.startJob(e.WrappedJob) - e.Prev = e.Next - e.Next = e.Schedule.Next(now) - c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next) - } - - case newEntry := <-c.add: - timer.Stop() - now = c.now() - newEntry.Next = newEntry.Schedule.Next(now) - c.entries = append(c.entries, newEntry) - c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next) - - case replyChan := <-c.snapshot: - replyChan <- c.entrySnapshot() - continue - - case <-c.stop: - timer.Stop() - c.logger.Info("stop") - return - - case id := <-c.remove: - timer.Stop() - now = c.now() - c.removeEntry(id) - c.logger.Info("removed", "entry", id) - } - - break - } - } -} - -// startJob runs the given job in a new goroutine. -func (c *Cron) startJob(j Job) { - c.jobWaiter.Add(1) - go func() { - defer c.jobWaiter.Done() - j.Run() - }() -} - -// now returns current time in c location -func (c *Cron) now() time.Time { - return time.Now().In(c.location) -} - -// Stop stops the cron scheduler if it is running; otherwise it does nothing. -// A context is returned so the caller can wait for running jobs to complete. -func (c *Cron) Stop() context.Context { - c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - c.stop <- struct{}{} - c.running = false - } - ctx, cancel := context.WithCancel(context.Background()) - go func() { - c.jobWaiter.Wait() - cancel() - }() - return ctx -} - -// entrySnapshot returns a copy of the current cron entry list. -func (c *Cron) entrySnapshot() []Entry { - var entries = make([]Entry, len(c.entries)) - for i, e := range c.entries { - entries[i] = *e - } - return entries -} - -func (c *Cron) removeEntry(id EntryID) { - var entries []*Entry - for _, e := range c.entries { - if e.ID != id { - entries = append(entries, e) - } - } - c.entries = entries -} diff --git a/vendor/github.com/robfig/cron/v3/doc.go b/vendor/github.com/robfig/cron/v3/doc.go deleted file mode 100644 index fa5d08b4..00000000 --- a/vendor/github.com/robfig/cron/v3/doc.go +++ /dev/null @@ -1,231 +0,0 @@ -/* -Package cron implements a cron spec parser and job runner. - -Installation - -To download the specific tagged release, run: - - go get github.com/robfig/cron/v3@v3.0.0 - -Import it in your program as: - - import "github.com/robfig/cron/v3" - -It requires Go 1.11 or later due to usage of Go Modules. - -Usage - -Callers may register Funcs to be invoked on a given schedule. Cron will run -them in their own goroutines. - - c := cron.New() - c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") }) - c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") }) - c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") }) - c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") }) - c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") }) - c.Start() - .. - // Funcs are invoked in their own goroutine, asynchronously. - ... - // Funcs may also be added to a running Cron - c.AddFunc("@daily", func() { fmt.Println("Every day") }) - .. - // Inspect the cron job entries' next and previous run times. - inspect(c.Entries()) - .. - c.Stop() // Stop the scheduler (does not stop any jobs already running). - -CRON Expression Format - -A cron expression represents a set of times, using 5 space-separated fields. - - Field name | Mandatory? | Allowed values | Allowed special characters - ---------- | ---------- | -------------- | -------------------------- - Minutes | Yes | 0-59 | * / , - - Hours | Yes | 0-23 | * / , - - Day of month | Yes | 1-31 | * / , - ? - Month | Yes | 1-12 or JAN-DEC | * / , - - Day of week | Yes | 0-6 or SUN-SAT | * / , - ? - -Month and Day-of-week field values are case insensitive. "SUN", "Sun", and -"sun" are equally accepted. - -The specific interpretation of the format is based on the Cron Wikipedia page: -https://en.wikipedia.org/wiki/Cron - -Alternative Formats - -Alternative Cron expression formats support other fields like seconds. You can -implement that by creating a custom Parser as follows. - - cron.New( - cron.WithParser( - cron.NewParser( - cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))) - -Since adding Seconds is the most common modification to the standard cron spec, -cron provides a builtin function to do that, which is equivalent to the custom -parser you saw earlier, except that its seconds field is REQUIRED: - - cron.New(cron.WithSeconds()) - -That emulates Quartz, the most popular alternative Cron schedule format: -http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html - -Special Characters - -Asterisk ( * ) - -The asterisk indicates that the cron expression will match for all values of the -field; e.g., using an asterisk in the 5th field (month) would indicate every -month. - -Slash ( / ) - -Slashes are used to describe increments of ranges. For example 3-59/15 in the -1st field (minutes) would indicate the 3rd minute of the hour and every 15 -minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", -that is, an increment over the largest possible range of the field. The form -"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the -increment until the end of that specific range. It does not wrap around. - -Comma ( , ) - -Commas are used to separate items of a list. For example, using "MON,WED,FRI" in -the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. - -Hyphen ( - ) - -Hyphens are used to define ranges. For example, 9-17 would indicate every -hour between 9am and 5pm inclusive. - -Question mark ( ? ) - -Question mark may be used instead of '*' for leaving either day-of-month or -day-of-week blank. - -Predefined schedules - -You may use one of several pre-defined schedules in place of a cron expression. - - Entry | Description | Equivalent To - ----- | ----------- | ------------- - @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * - @monthly | Run once a month, midnight, first of month | 0 0 1 * * - @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 - @daily (or @midnight) | Run once a day, midnight | 0 0 * * * - @hourly | Run once an hour, beginning of hour | 0 * * * * - -Intervals - -You may also schedule a job to execute at fixed intervals, starting at the time it's added -or cron is run. This is supported by formatting the cron spec like this: - - @every - -where "duration" is a string accepted by time.ParseDuration -(http://golang.org/pkg/time/#ParseDuration). - -For example, "@every 1h30m10s" would indicate a schedule that activates after -1 hour, 30 minutes, 10 seconds, and then every interval after that. - -Note: The interval does not take the job runtime into account. For example, -if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, -it will have only 2 minutes of idle time between each run. - -Time zones - -By default, all interpretation and scheduling is done in the machine's local -time zone (time.Local). You can specify a different time zone on construction: - - cron.New( - cron.WithLocation(time.UTC)) - -Individual cron schedules may also override the time zone they are to be -interpreted in by providing an additional space-separated field at the beginning -of the cron spec, of the form "CRON_TZ=Asia/Tokyo". - -For example: - - # Runs at 6am in time.Local - cron.New().AddFunc("0 6 * * ?", ...) - - # Runs at 6am in America/New_York - nyc, _ := time.LoadLocation("America/New_York") - c := cron.New(cron.WithLocation(nyc)) - c.AddFunc("0 6 * * ?", ...) - - # Runs at 6am in Asia/Tokyo - cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) - - # Runs at 6am in Asia/Tokyo - c := cron.New(cron.WithLocation(nyc)) - c.SetLocation("America/New_York") - c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) - -The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility. - -Be aware that jobs scheduled during daylight-savings leap-ahead transitions will -not be run! - -Job Wrappers - -A Cron runner may be configured with a chain of job wrappers to add -cross-cutting functionality to all submitted jobs. For example, they may be used -to achieve the following effects: - - - Recover any panics from jobs (activated by default) - - Delay a job's execution if the previous run hasn't completed yet - - Skip a job's execution if the previous run hasn't completed yet - - Log each job's invocations - -Install wrappers for all jobs added to a cron using the `cron.WithChain` option: - - cron.New(cron.WithChain( - cron.SkipIfStillRunning(logger), - )) - -Install wrappers for individual jobs by explicitly wrapping them: - - job = cron.NewChain( - cron.SkipIfStillRunning(logger), - ).Then(job) - -Thread safety - -Since the Cron service runs concurrently with the calling code, some amount of -care must be taken to ensure proper synchronization. - -All cron methods are designed to be correctly synchronized as long as the caller -ensures that invocations have a clear happens-before ordering between them. - -Logging - -Cron defines a Logger interface that is a subset of the one defined in -github.com/go-logr/logr. It has two logging levels (Info and Error), and -parameters are key/value pairs. This makes it possible for cron logging to plug -into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided -to wrap the standard library *log.Logger. - -For additional insight into Cron operations, verbose logging may be activated -which will record job runs, scheduling decisions, and added or removed jobs. -Activate it with a one-off logger as follows: - - cron.New( - cron.WithLogger( - cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) - - -Implementation - -Cron entries are stored in an array, sorted by their next activation time. Cron -sleeps until the next job is due to be run. - -Upon waking: - - it runs each entry that is active on that second - - it calculates the next run times for the jobs that were run - - it re-sorts the array of entries by next activation time. - - it goes to sleep until the soonest job. -*/ -package cron diff --git a/vendor/github.com/robfig/cron/v3/logger.go b/vendor/github.com/robfig/cron/v3/logger.go deleted file mode 100644 index b4efcc05..00000000 --- a/vendor/github.com/robfig/cron/v3/logger.go +++ /dev/null @@ -1,86 +0,0 @@ -package cron - -import ( - "io/ioutil" - "log" - "os" - "strings" - "time" -) - -// DefaultLogger is used by Cron if none is specified. -var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) - -// DiscardLogger can be used by callers to discard all log messages. -var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0)) - -// Logger is the interface used in this package for logging, so that any backend -// can be plugged in. It is a subset of the github.com/go-logr/logr interface. -type Logger interface { - // Info logs routine messages about cron's operation. - Info(msg string, keysAndValues ...interface{}) - // Error logs an error condition. - Error(err error, msg string, keysAndValues ...interface{}) -} - -// PrintfLogger wraps a Printf-based logger (such as the standard library "log") -// into an implementation of the Logger interface which logs errors only. -func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { - return printfLogger{l, false} -} - -// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library -// "log") into an implementation of the Logger interface which logs everything. -func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { - return printfLogger{l, true} -} - -type printfLogger struct { - logger interface{ Printf(string, ...interface{}) } - logInfo bool -} - -func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) { - if pl.logInfo { - keysAndValues = formatTimes(keysAndValues) - pl.logger.Printf( - formatString(len(keysAndValues)), - append([]interface{}{msg}, keysAndValues...)...) - } -} - -func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) { - keysAndValues = formatTimes(keysAndValues) - pl.logger.Printf( - formatString(len(keysAndValues)+2), - append([]interface{}{msg, "error", err}, keysAndValues...)...) -} - -// formatString returns a logfmt-like format string for the number of -// key/values. -func formatString(numKeysAndValues int) string { - var sb strings.Builder - sb.WriteString("%s") - if numKeysAndValues > 0 { - sb.WriteString(", ") - } - for i := 0; i < numKeysAndValues/2; i++ { - if i > 0 { - sb.WriteString(", ") - } - sb.WriteString("%v=%v") - } - return sb.String() -} - -// formatTimes formats any time.Time values as RFC3339. -func formatTimes(keysAndValues []interface{}) []interface{} { - var formattedArgs []interface{} - for _, arg := range keysAndValues { - if t, ok := arg.(time.Time); ok { - arg = t.Format(time.RFC3339) - } - formattedArgs = append(formattedArgs, arg) - } - return formattedArgs -} diff --git a/vendor/github.com/robfig/cron/v3/option.go b/vendor/github.com/robfig/cron/v3/option.go deleted file mode 100644 index 09e4278e..00000000 --- a/vendor/github.com/robfig/cron/v3/option.go +++ /dev/null @@ -1,45 +0,0 @@ -package cron - -import ( - "time" -) - -// Option represents a modification to the default behavior of a Cron. -type Option func(*Cron) - -// WithLocation overrides the timezone of the cron instance. -func WithLocation(loc *time.Location) Option { - return func(c *Cron) { - c.location = loc - } -} - -// WithSeconds overrides the parser used for interpreting job schedules to -// include a seconds field as the first one. -func WithSeconds() Option { - return WithParser(NewParser( - Second | Minute | Hour | Dom | Month | Dow | Descriptor, - )) -} - -// WithParser overrides the parser used for interpreting job schedules. -func WithParser(p ScheduleParser) Option { - return func(c *Cron) { - c.parser = p - } -} - -// WithChain specifies Job wrappers to apply to all jobs added to this cron. -// Refer to the Chain* functions in this package for provided wrappers. -func WithChain(wrappers ...JobWrapper) Option { - return func(c *Cron) { - c.chain = NewChain(wrappers...) - } -} - -// WithLogger uses the provided logger. -func WithLogger(logger Logger) Option { - return func(c *Cron) { - c.logger = logger - } -} diff --git a/vendor/github.com/robfig/cron/v3/parser.go b/vendor/github.com/robfig/cron/v3/parser.go deleted file mode 100644 index 3cf8879f..00000000 --- a/vendor/github.com/robfig/cron/v3/parser.go +++ /dev/null @@ -1,434 +0,0 @@ -package cron - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// Configuration options for creating a parser. Most options specify which -// fields should be included, while others enable features. If a field is not -// included the parser will assume a default value. These options do not change -// the order fields are parse in. -type ParseOption int - -const ( - Second ParseOption = 1 << iota // Seconds field, default 0 - SecondOptional // Optional seconds field, default 0 - Minute // Minutes field, default 0 - Hour // Hours field, default 0 - Dom // Day of month field, default * - Month // Month field, default * - Dow // Day of week field, default * - DowOptional // Optional day of week field, default * - Descriptor // Allow descriptors such as @monthly, @weekly, etc. -) - -var places = []ParseOption{ - Second, - Minute, - Hour, - Dom, - Month, - Dow, -} - -var defaults = []string{ - "0", - "0", - "0", - "*", - "*", - "*", -} - -// A custom Parser that can be configured. -type Parser struct { - options ParseOption -} - -// NewParser creates a Parser with custom options. -// -// It panics if more than one Optional is given, since it would be impossible to -// correctly infer which optional is provided or missing in general. -// -// Examples -// -// // Standard parser without descriptors -// specParser := NewParser(Minute | Hour | Dom | Month | Dow) -// sched, err := specParser.Parse("0 0 15 */3 *") -// -// // Same as above, just excludes time fields -// subsParser := NewParser(Dom | Month | Dow) -// sched, err := specParser.Parse("15 */3 *") -// -// // Same as above, just makes Dow optional -// subsParser := NewParser(Dom | Month | DowOptional) -// sched, err := specParser.Parse("15 */3") -// -func NewParser(options ParseOption) Parser { - optionals := 0 - if options&DowOptional > 0 { - optionals++ - } - if options&SecondOptional > 0 { - optionals++ - } - if optionals > 1 { - panic("multiple optionals may not be configured") - } - return Parser{options} -} - -// Parse returns a new crontab schedule representing the given spec. -// It returns a descriptive error if the spec is not valid. -// It accepts crontab specs and features configured by NewParser. -func (p Parser) Parse(spec string) (Schedule, error) { - if len(spec) == 0 { - return nil, fmt.Errorf("empty spec string") - } - - // Extract timezone if present - var loc = time.Local - if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") { - var err error - i := strings.Index(spec, " ") - eq := strings.Index(spec, "=") - if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil { - return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err) - } - spec = strings.TrimSpace(spec[i:]) - } - - // Handle named schedules (descriptors), if configured - if strings.HasPrefix(spec, "@") { - if p.options&Descriptor == 0 { - return nil, fmt.Errorf("parser does not accept descriptors: %v", spec) - } - return parseDescriptor(spec, loc) - } - - // Split on whitespace. - fields := strings.Fields(spec) - - // Validate & fill in any omitted or optional fields - var err error - fields, err = normalizeFields(fields, p.options) - if err != nil { - return nil, err - } - - field := func(field string, r bounds) uint64 { - if err != nil { - return 0 - } - var bits uint64 - bits, err = getField(field, r) - return bits - } - - var ( - second = field(fields[0], seconds) - minute = field(fields[1], minutes) - hour = field(fields[2], hours) - dayofmonth = field(fields[3], dom) - month = field(fields[4], months) - dayofweek = field(fields[5], dow) - ) - if err != nil { - return nil, err - } - - return &SpecSchedule{ - Second: second, - Minute: minute, - Hour: hour, - Dom: dayofmonth, - Month: month, - Dow: dayofweek, - Location: loc, - }, nil -} - -// normalizeFields takes a subset set of the time fields and returns the full set -// with defaults (zeroes) populated for unset fields. -// -// As part of performing this function, it also validates that the provided -// fields are compatible with the configured options. -func normalizeFields(fields []string, options ParseOption) ([]string, error) { - // Validate optionals & add their field to options - optionals := 0 - if options&SecondOptional > 0 { - options |= Second - optionals++ - } - if options&DowOptional > 0 { - options |= Dow - optionals++ - } - if optionals > 1 { - return nil, fmt.Errorf("multiple optionals may not be configured") - } - - // Figure out how many fields we need - max := 0 - for _, place := range places { - if options&place > 0 { - max++ - } - } - min := max - optionals - - // Validate number of fields - if count := len(fields); count < min || count > max { - if min == max { - return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields) - } - return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields) - } - - // Populate the optional field if not provided - if min < max && len(fields) == min { - switch { - case options&DowOptional > 0: - fields = append(fields, defaults[5]) // TODO: improve access to default - case options&SecondOptional > 0: - fields = append([]string{defaults[0]}, fields...) - default: - return nil, fmt.Errorf("unknown optional field") - } - } - - // Populate all fields not part of options with their defaults - n := 0 - expandedFields := make([]string, len(places)) - copy(expandedFields, defaults) - for i, place := range places { - if options&place > 0 { - expandedFields[i] = fields[n] - n++ - } - } - return expandedFields, nil -} - -var standardParser = NewParser( - Minute | Hour | Dom | Month | Dow | Descriptor, -) - -// ParseStandard returns a new crontab schedule representing the given -// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries -// representing: minute, hour, day of month, month and day of week, in that -// order. It returns a descriptive error if the spec is not valid. -// -// It accepts -// - Standard crontab specs, e.g. "* * * * ?" -// - Descriptors, e.g. "@midnight", "@every 1h30m" -func ParseStandard(standardSpec string) (Schedule, error) { - return standardParser.Parse(standardSpec) -} - -// getField returns an Int with the bits set representing all of the times that -// the field represents or error parsing field value. A "field" is a comma-separated -// list of "ranges". -func getField(field string, r bounds) (uint64, error) { - var bits uint64 - ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) - for _, expr := range ranges { - bit, err := getRange(expr, r) - if err != nil { - return bits, err - } - bits |= bit - } - return bits, nil -} - -// getRange returns the bits indicated by the given expression: -// number | number "-" number [ "/" number ] -// or error parsing range. -func getRange(expr string, r bounds) (uint64, error) { - var ( - start, end, step uint - rangeAndStep = strings.Split(expr, "/") - lowAndHigh = strings.Split(rangeAndStep[0], "-") - singleDigit = len(lowAndHigh) == 1 - err error - ) - - var extra uint64 - if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { - start = r.min - end = r.max - extra = starBit - } else { - start, err = parseIntOrName(lowAndHigh[0], r.names) - if err != nil { - return 0, err - } - switch len(lowAndHigh) { - case 1: - end = start - case 2: - end, err = parseIntOrName(lowAndHigh[1], r.names) - if err != nil { - return 0, err - } - default: - return 0, fmt.Errorf("too many hyphens: %s", expr) - } - } - - switch len(rangeAndStep) { - case 1: - step = 1 - case 2: - step, err = mustParseInt(rangeAndStep[1]) - if err != nil { - return 0, err - } - - // Special handling: "N/step" means "N-max/step". - if singleDigit { - end = r.max - } - if step > 1 { - extra = 0 - } - default: - return 0, fmt.Errorf("too many slashes: %s", expr) - } - - if start < r.min { - return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr) - } - if end > r.max { - return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr) - } - if start > end { - return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr) - } - if step == 0 { - return 0, fmt.Errorf("step of range should be a positive number: %s", expr) - } - - return getBits(start, end, step) | extra, nil -} - -// parseIntOrName returns the (possibly-named) integer contained in expr. -func parseIntOrName(expr string, names map[string]uint) (uint, error) { - if names != nil { - if namedInt, ok := names[strings.ToLower(expr)]; ok { - return namedInt, nil - } - } - return mustParseInt(expr) -} - -// mustParseInt parses the given expression as an int or returns an error. -func mustParseInt(expr string) (uint, error) { - num, err := strconv.Atoi(expr) - if err != nil { - return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err) - } - if num < 0 { - return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr) - } - - return uint(num), nil -} - -// getBits sets all bits in the range [min, max], modulo the given step size. -func getBits(min, max, step uint) uint64 { - var bits uint64 - - // If step is 1, use shifts. - if step == 1 { - return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) - } - - // Else, use a simple loop. - for i := min; i <= max; i += step { - bits |= 1 << i - } - return bits -} - -// all returns all bits within the given bounds. (plus the star bit) -func all(r bounds) uint64 { - return getBits(r.min, r.max, 1) | starBit -} - -// parseDescriptor returns a predefined schedule for the expression, or error if none matches. -func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) { - switch descriptor { - case "@yearly", "@annually": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: 1 << dom.min, - Month: 1 << months.min, - Dow: all(dow), - Location: loc, - }, nil - - case "@monthly": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: 1 << dom.min, - Month: all(months), - Dow: all(dow), - Location: loc, - }, nil - - case "@weekly": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: all(dom), - Month: all(months), - Dow: 1 << dow.min, - Location: loc, - }, nil - - case "@daily", "@midnight": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: all(dom), - Month: all(months), - Dow: all(dow), - Location: loc, - }, nil - - case "@hourly": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: all(hours), - Dom: all(dom), - Month: all(months), - Dow: all(dow), - Location: loc, - }, nil - - } - - const every = "@every " - if strings.HasPrefix(descriptor, every) { - duration, err := time.ParseDuration(descriptor[len(every):]) - if err != nil { - return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err) - } - return Every(duration), nil - } - - return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor) -} diff --git a/vendor/github.com/robfig/cron/v3/spec.go b/vendor/github.com/robfig/cron/v3/spec.go deleted file mode 100644 index fa1e241e..00000000 --- a/vendor/github.com/robfig/cron/v3/spec.go +++ /dev/null @@ -1,188 +0,0 @@ -package cron - -import "time" - -// SpecSchedule specifies a duty cycle (to the second granularity), based on a -// traditional crontab specification. It is computed initially and stored as bit sets. -type SpecSchedule struct { - Second, Minute, Hour, Dom, Month, Dow uint64 - - // Override location for this schedule. - Location *time.Location -} - -// bounds provides a range of acceptable values (plus a map of name to value). -type bounds struct { - min, max uint - names map[string]uint -} - -// The bounds for each field. -var ( - seconds = bounds{0, 59, nil} - minutes = bounds{0, 59, nil} - hours = bounds{0, 23, nil} - dom = bounds{1, 31, nil} - months = bounds{1, 12, map[string]uint{ - "jan": 1, - "feb": 2, - "mar": 3, - "apr": 4, - "may": 5, - "jun": 6, - "jul": 7, - "aug": 8, - "sep": 9, - "oct": 10, - "nov": 11, - "dec": 12, - }} - dow = bounds{0, 6, map[string]uint{ - "sun": 0, - "mon": 1, - "tue": 2, - "wed": 3, - "thu": 4, - "fri": 5, - "sat": 6, - }} -) - -const ( - // Set the top bit if a star was included in the expression. - starBit = 1 << 63 -) - -// Next returns the next time this schedule is activated, greater than the given -// time. If no time can be found to satisfy the schedule, return the zero time. -func (s *SpecSchedule) Next(t time.Time) time.Time { - // General approach - // - // For Month, Day, Hour, Minute, Second: - // Check if the time value matches. If yes, continue to the next field. - // If the field doesn't match the schedule, then increment the field until it matches. - // While incrementing the field, a wrap-around brings it back to the beginning - // of the field list (since it is necessary to re-verify previous field - // values) - - // Convert the given time into the schedule's timezone, if one is specified. - // Save the original timezone so we can convert back after we find a time. - // Note that schedules without a time zone specified (time.Local) are treated - // as local to the time provided. - origLocation := t.Location() - loc := s.Location - if loc == time.Local { - loc = t.Location() - } - if s.Location != time.Local { - t = t.In(s.Location) - } - - // Start at the earliest possible time (the upcoming second). - t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) - - // This flag indicates whether a field has been incremented. - added := false - - // If no time is found within five years, return zero. - yearLimit := t.Year() + 5 - -WRAP: - if t.Year() > yearLimit { - return time.Time{} - } - - // Find the first applicable month. - // If it's this month, then do nothing. - for 1< 12 { - t = t.Add(time.Duration(24-t.Hour()) * time.Hour) - } else { - t = t.Add(time.Duration(-t.Hour()) * time.Hour) - } - } - - if t.Day() == 1 { - goto WRAP - } - } - - for 1< 0 - dowMatch bool = 1< 0 - ) - if s.Dom&starBit > 0 || s.Dow&starBit > 0 { - return domMatch && dowMatch - } - return domMatch || dowMatch -} diff --git a/vendor/github.com/rogpeppe/fastuuid/LICENSE b/vendor/github.com/rogpeppe/fastuuid/LICENSE deleted file mode 100644 index 9525fc82..00000000 --- a/vendor/github.com/rogpeppe/fastuuid/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright © 2014, Roger Peppe -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of this project nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/fastuuid/README.md b/vendor/github.com/rogpeppe/fastuuid/README.md deleted file mode 100644 index 5a9e5f91..00000000 --- a/vendor/github.com/rogpeppe/fastuuid/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# fastuuid --- - import "github.com/rogpeppe/fastuuid" - -Package fastuuid provides fast UUID generation of 192 bit universally unique -identifiers. - -It also provides simple support for 128-bit RFC-4122 V4 UUID strings. - -Note that the generated UUIDs are not unguessable - each UUID generated from a -Generator is adjacent to the previously generated UUID. - -By way of comparison with two other popular UUID-generation packages, -github.com/satori/go.uuid and github.com/google/uuid, here are some benchmarks: - - BenchmarkNext-4 128272185 9.20 ns/op - BenchmarkHex128-4 14323180 76.4 ns/op - BenchmarkContended-4 45741997 26.4 ns/op - BenchmarkSatoriNext-4 1231281 967 ns/op - BenchmarkSatoriHex128-4 1000000 1041 ns/op - BenchmarkSatoriContended-4 1765520 666 ns/op - BenchmarkGoogleNext-4 1256250 958 ns/op - BenchmarkGoogleHex128-4 1000000 1044 ns/op - BenchmarkGoogleContended-4 1746570 690 ns/op - -## Usage - -#### func Hex128 - -```go -func Hex128(uuid [24]byte) string -``` -Hex128 returns an RFC4122 V4 representation of the first 128 bits of the given -UUID. For example: - - f81d4fae-7dec-41d0-8765-00a0c91e6bf6. - -Note: before encoding, it swaps bytes 6 and 9 so that all the varying bits of -the UUID as returned from Generator.Next are reflected in the Hex128 -representation. - -If you want unpredictable UUIDs, you might want to consider hashing the uuid -(using SHA256, for example) before passing it to Hex128. - -#### func ValidHex128 - -```go -func ValidHex128(id string) bool -``` -ValidHex128 reports whether id is a valid UUID as returned by Hex128 and various -other UUID packages, such as github.com/satori/go.uuid's NewV4 function. - -Note that it does not allow upper case hex. - -#### type Generator - -```go -type Generator struct { -} -``` - -Generator represents a UUID generator that generates UUIDs in sequence from a -random starting point. - -#### func MustNewGenerator - -```go -func MustNewGenerator() *Generator -``` -MustNewGenerator is like NewGenerator but panics on failure. - -#### func NewGenerator - -```go -func NewGenerator() (*Generator, error) -``` -NewGenerator returns a new Generator. It can fail if the crypto/rand read fails. - -#### func (*Generator) Hex128 - -```go -func (g *Generator) Hex128() string -``` -Hex128 is a convenience method that returns Hex128(g.Next()). - -#### func (*Generator) Next - -```go -func (g *Generator) Next() [24]byte -``` -Next returns the next UUID from the generator. Only the first 8 bytes can differ -from the previous UUID, so taking a slice of the first 16 bytes is sufficient to -provide a somewhat less secure 128 bit UUID. - -It is OK to call this method concurrently. diff --git a/vendor/github.com/rogpeppe/fastuuid/uuid.go b/vendor/github.com/rogpeppe/fastuuid/uuid.go deleted file mode 100644 index 6cb38365..00000000 --- a/vendor/github.com/rogpeppe/fastuuid/uuid.go +++ /dev/null @@ -1,146 +0,0 @@ -// Package fastuuid provides fast UUID generation of 192 bit -// universally unique identifiers. -// -// It also provides simple support for 128-bit RFC-4122 V4 UUID strings. -// -// Note that the generated UUIDs are not unguessable - each -// UUID generated from a Generator is adjacent to the -// previously generated UUID. -// -// By way of comparison with two other popular UUID-generation packages, github.com/satori/go.uuid -// and github.com/google/uuid, here are some benchmarks: -// -// BenchmarkNext-4 128272185 9.20 ns/op -// BenchmarkHex128-4 14323180 76.4 ns/op -// BenchmarkContended-4 45741997 26.4 ns/op -// BenchmarkSatoriNext-4 1231281 967 ns/op -// BenchmarkSatoriHex128-4 1000000 1041 ns/op -// BenchmarkSatoriContended-4 1765520 666 ns/op -// BenchmarkGoogleNext-4 1256250 958 ns/op -// BenchmarkGoogleHex128-4 1000000 1044 ns/op -// BenchmarkGoogleContended-4 1746570 690 ns/op -package fastuuid - -import ( - "crypto/rand" - "encoding/binary" - "encoding/hex" - "errors" - "sync/atomic" -) - -// Generator represents a UUID generator that -// generates UUIDs in sequence from a random starting -// point. -type Generator struct { - // The constant seed. The first 8 bytes of this are - // copied into counter and then ignored thereafter. - seed [24]byte - counter uint64 -} - -// NewGenerator returns a new Generator. -// It can fail if the crypto/rand read fails. -func NewGenerator() (*Generator, error) { - var g Generator - _, err := rand.Read(g.seed[:]) - if err != nil { - return nil, errors.New("cannot generate random seed: " + err.Error()) - } - g.counter = binary.LittleEndian.Uint64(g.seed[:8]) - return &g, nil -} - -// MustNewGenerator is like NewGenerator -// but panics on failure. -func MustNewGenerator() *Generator { - g, err := NewGenerator() - if err != nil { - panic(err) - } - return g -} - -// Next returns the next UUID from the generator. -// Only the first 8 bytes can differ from the previous -// UUID, so taking a slice of the first 16 bytes -// is sufficient to provide a somewhat less secure 128 bit UUID. -// -// It is OK to call this method concurrently. -func (g *Generator) Next() [24]byte { - x := atomic.AddUint64(&g.counter, 1) - uuid := g.seed - binary.LittleEndian.PutUint64(uuid[:8], x) - return uuid -} - -// Hex128 is a convenience method that returns Hex128(g.Next()). -func (g *Generator) Hex128() string { - return Hex128(g.Next()) -} - -// Hex128 returns an RFC4122 V4 representation of the -// first 128 bits of the given UUID. For example: -// -// f81d4fae-7dec-41d0-8765-00a0c91e6bf6. -// -// Note: before encoding, it swaps bytes 6 and 9 -// so that all the varying bits of the UUID as -// returned from Generator.Next are reflected -// in the Hex128 representation. -// -// If you want unpredictable UUIDs, you might want to consider -// hashing the uuid (using SHA256, for example) before passing it -// to Hex128. -func Hex128(uuid [24]byte) string { - // As fastuuid only varies the first 8 bytes of the UUID and we - // don't want to lose any of that variance, swap the UUID - // version byte in that range for one outside it. - uuid[6], uuid[9] = uuid[9], uuid[6] - - // Version 4. - uuid[6] = (uuid[6] & 0x0f) | 0x40 - // RFC4122 variant. - uuid[8] = uuid[8]&0x3f | 0x80 - - b := make([]byte, 36) - hex.Encode(b[0:8], uuid[0:4]) - b[8] = '-' - hex.Encode(b[9:13], uuid[4:6]) - b[13] = '-' - hex.Encode(b[14:18], uuid[6:8]) - b[18] = '-' - hex.Encode(b[19:23], uuid[8:10]) - b[23] = '-' - hex.Encode(b[24:], uuid[10:16]) - return string(b) -} - -// ValidHex128 reports whether id is a valid UUID as returned by Hex128 -// and various other UUID packages, such as github.com/satori/go.uuid's -// NewV4 function. -// -// Note that it does not allow upper case hex. -func ValidHex128(id string) bool { - if len(id) != 36 { - return false - } - if id[8] != '-' || id[13] != '-' || id[18] != '-' || id[23] != '-' { - return false - } - return isValidHex(id[0:8]) && - isValidHex(id[9:13]) && - isValidHex(id[14:18]) && - isValidHex(id[19:23]) && - isValidHex(id[24:]) -} - -func isValidHex(s string) bool { - for i := 0; i < len(s); i++ { - c := s[i] - if !('0' <= c && c <= '9' || 'a' <= c && c <= 'f') { - return false - } - } - return true -} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore deleted file mode 100644 index 1fb13abe..00000000 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -logrus -vendor - -.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml deleted file mode 100644 index 65dc2850..00000000 --- a/vendor/github.com/sirupsen/logrus/.golangci.yml +++ /dev/null @@ -1,40 +0,0 @@ -run: - # do not run on test files yet - tests: false - -# all available settings of specific linters -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - lll: - line-length: 100 - tab-width: 4 - - prealloc: - simple: false - range-loops: false - for-loops: false - - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - -linters: - enable: - - megacheck - - govet - disable: - - maligned - - prealloc - disable-all: false - presets: - - bugs - - unused - fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml deleted file mode 100644 index c1dbd5a3..00000000 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go_import_path: github.com/sirupsen/logrus -git: - depth: 1 -env: - - GO111MODULE=on -go: 1.15.x -os: linux -install: - - ./travis/install.sh -script: - - cd ci - - go run mage.go -v -w ../ crossBuild - - go run mage.go -v -w ../ lint - - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index 7567f612..00000000 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,259 +0,0 @@ -# 1.8.1 -Code quality: - * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer - * improve timestamp format documentation - -Fixes: - * fix race condition on logger hooks - - -# 1.8.0 - -Correct versioning number replacing v1.7.1. - -# 1.7.1 - -Beware this release has introduced a new public API and its semver is therefore incorrect. - -Code quality: - * use go 1.15 in travis - * use magefile as task runner - -Fixes: - * small fixes about new go 1.13 error formatting system - * Fix for long time race condiction with mutating data hooks - -Features: - * build support for zos - -# 1.7.0 -Fixes: - * the dependency toward a windows terminal library has been removed - -Features: - * a new buffer pool management API has been added - * a set of `Fn()` functions have been added - -# 1.6.0 -Fixes: - * end of line cleanup - * revert the entry concurrency bug fix whic leads to deadlock under some circumstances - * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 - -Features: - * add an option to the `TextFormatter` to completely disable fields quoting - -# 1.5.0 -Code quality: - * add golangci linter run on travis - -Fixes: - * add mutex for hooks concurrent access on `Entry` data - * caller function field for go1.14 - * fix build issue for gopherjs target - -Feature: - * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level - * add a `DisableHTMLEscape` option in the `JSONFormatter` - * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` - -# 1.4.2 - * Fixes build break for plan9, nacl, solaris -# 1.4.1 -This new release introduces: - * Enhance TextFormatter to not print caller information when they are empty (#944) - * Remove dependency on golang.org/x/crypto (#932, #943) - -Fixes: - * Fix Entry.WithContext method to return a copy of the initial entry (#941) - -# 1.4.0 -This new release introduces: - * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). - * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) - * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). - -Fixes: - * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). - * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) - * Fix infinite recursion on unknown `Level.String()` (#907) - * Fix race condition in `getCaller` (#916). - - -# 1.3.0 -This new release introduces: - * Log, Logf, Logln functions for Logger and Entry that take a Level - -Fixes: - * Building prometheus node_exporter on AIX (#840) - * Race condition in TextFormatter (#468) - * Travis CI import path (#868) - * Remove coloured output on Windows (#862) - * Pointer to func as field in JSONFormatter (#870) - * Properly marshal Levels (#873) - -# 1.2.0 -This new release introduces: - * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued - * A new trace level named `Trace` whose level is below `Debug` - * A configurable exit function to be called upon a Fatal trace - * The `Level` object now implements `encoding.TextUnmarshaler` interface - -# 1.1.1 -This is a bug fix release. - * fix the build break on Solaris - * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized - -# 1.1.0 -This new release introduces: - * several fixes: - * a fix for a race condition on entry formatting - * proper cleanup of previously used entries before putting them back in the pool - * the extra new line at the end of message in text formatter has been removed - * a new global public API to check if a level is activated: IsLevelEnabled - * the following methods have been added to the Logger object - * IsLevelEnabled - * SetFormatter - * SetOutput - * ReplaceHooks - * introduction of go module - * an indent configuration for the json formatter - * output colour support for windows - * the field sort function is now configurable for text formatter - * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater - -# 1.0.6 - -This new release introduces: - * a new api WithTime which allows to easily force the time of the log entry - which is mostly useful for logger wrapper - * a fix reverting the immutability of the entry given as parameter to the hooks - a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary - * a new SetOutput method in the Logger - * a new configuration of the textformatter to configure the name of the default keys - * a new configuration of the text formatter to disable the level truncation - -# 1.0.5 - -* Fix hooks race (#707) -* Fix panic deadlock (#695) - -# 1.0.4 - -* Fix race when adding hooks (#612) -* Fix terminal check in AppEngine (#635) - -# 1.0.3 - -* Replace example files with testable examples - -# 1.0.2 - -* bug: quote non-string values in text formatter (#583) -* Make (*Logger) SetLevel a public method - -# 1.0.1 - -* bug: fix escaping in text formatter (#575) - -# 1.0.0 - -* Officially changed name to lower-case -* bug: colors on Windows 10 (#541) -* bug: fix race in accessing level (#512) - -# 0.11.5 - -* feature: add writer and writerlevel to entry (#372) - -# 0.11.4 - -* bug: fix undefined variable on solaris (#493) - -# 0.11.3 - -* formatter: configure quoting of empty values (#484) -* formatter: configure quoting character (default is `"`) (#484) -* bug: fix not importing io correctly in non-linux environments (#481) - -# 0.11.2 - -* bug: fix windows terminal detection (#476) - -# 0.11.1 - -* bug: fix tty detection with custom out (#471) - -# 0.11.0 - -* performance: Use bufferpool to allocate (#370) -* terminal: terminal detection for app-engine (#343) -* feature: exit handler (#375) - -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42..00000000 --- a/vendor/github.com/sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md deleted file mode 100644 index b042c896..00000000 --- a/vendor/github.com/sirupsen/logrus/README.md +++ /dev/null @@ -1,513 +0,0 @@ -# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. - -**Logrus is in maintenance-mode.** We will not be introducing new features. It's -simply too hard to do in a way that won't break many people's projects, which is -the last thing you want from your Logging library (again...). - -This does not mean Logrus is dead. Logrus will continue to be maintained for -security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). - -I believe Logrus' biggest contribution is to have played a part in today's -widespread use of structured logging in Golang. There doesn't seem to be a -reason to do a major, breaking iteration into Logrus V2, since the fantastic Go -community has built those independently. Many fantastic alternatives have sprung -up. Logrus would look like those, had it been re-designed with what we know -about structured logging in Go today. Check out, for example, -[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. - -[zerolog]: https://github.com/rs/zerolog -[zap]: https://github.com/uber-go/zap -[apex]: https://github.com/apex/log - -**Seeing weird case-sensitive problems?** It's in the past been possible to -import Logrus as both upper- and lower-case. Due to the Go package environment, -this caused issues in the community and we needed a standard. Some environments -experienced problems with the upper-case variant, so the lower-case was decided. -Everything using `logrus` will need to use the lower-case: -`github.com/sirupsen/logrus`. Any package that isn't, should be changed. - -To fix Glide, see [these -comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). -For an in-depth explanation of the casing issue, see [this -comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -``` -To ensure this behaviour even if a TTY is attached, set your formatter as follows: - -```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) -``` - -#### Logging Method Name - -If you wish to add the calling method as a field, instruct the logger via: -```go -log.SetReportCaller(true) -``` -This adds the caller as 'method' like so: - -```json -{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", -"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} -``` - -```text -time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin -``` -Note that this does add measurable overhead - the cost will depend on the version of Go, but is -between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: -``` -go test -bench=.*CallerTracing -``` - - -#### Case-sensitivity - -The organization's name was changed to lower-case--and this will not be changed -back. If you are getting import conflicts due to case sensitivity, please use -the lower-case import: `github.com/sirupsen/logrus`. - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stdout instead of the default stderr - // Can be any io.Writer, see below for File example - log.SetOutput(os.Stdout) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "os" - "github.com/sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout - - // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - // if err == nil { - // log.Out = file - // } else { - // log.Info("Failed to log to file, using default stderr") - // } - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Default Fields - -Often it's helpful to have fields _always_ attached to log statements in an -application or parts of one. For example, you may want to always log the -`request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on -every line, you can create a `logrus.Entry` to pass around instead: - -```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip -requestLogger.Warn("something not great happened") -``` - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) - - -#### Level logging - -Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/sirupsen/logrus" -) - -func init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true`. For Windows, see - [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). - * When colors are enabled, levels are truncated to 4 characters by default. To disable - truncation set the `DisableLevelTruncation` field to `true`. - * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). -* `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). - -Third party logging formatters: - -* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. -* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. -* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. -* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. -* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -This means that we can override the standard library logger easily: - -```go -logger := logrus.New() -logger.Formatter = &logrus.JSONFormatter{} - -// Use logrus for standard log output -// Note that `log` here references stdlib's log -// Not logrus imported under the name `log`. -log.SetOutput(logger.Writer()) -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -import( - "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSomething(t*testing.T){ - logger, hook := test.NewNullLogger() - logger.Error("Helloerror") - - assert.Equal(t, 1, len(hook.Entries)) - assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal(t, "Helloerror", hook.LastEntry().Message) - - hook.Reset() - assert.Nil(t, hook.LastEntry()) -} -``` - -#### Fatal handlers - -Logrus can register one or more functions that will be called when any `fatal` -level message is logged. The registered handlers will be executed before -logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. - -``` -... -handler := func() { - // gracefully shutdown something... -} -logrus.RegisterExitHandler(handler) -... -``` - -#### Thread safety - -By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. -If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. - -Situation when locking is not needed includes: - -* You have no hooks registered, or hooks calling is already thread-safe. - -* Writing to logger.Out is already thread-safe, for example: - - 1) logger.Out is protected by locks. - - 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) - - (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go deleted file mode 100644 index 8fd189e1..00000000 --- a/vendor/github.com/sirupsen/logrus/alt_exit.go +++ /dev/null @@ -1,76 +0,0 @@ -package logrus - -// The following code was sourced and modified from the -// https://github.com/tebeka/atexit package governed by the following license: -// -// Copyright (c) 2012 Miki Tebeka . -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import ( - "fmt" - "os" -) - -var handlers = []func(){} - -func runHandler(handler func()) { - defer func() { - if err := recover(); err != nil { - fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) - } - }() - - handler() -} - -func runHandlers() { - for _, handler := range handlers { - runHandler(handler) - } -} - -// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) -func Exit(code int) { - runHandlers() - os.Exit(code) -} - -// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func RegisterExitHandler(handler func()) { - handlers = append(handlers, handler) -} - -// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func DeferExitHandler(handler func()) { - handlers = append([]func(){handler}, handlers...) -} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml deleted file mode 100644 index df9d65c3..00000000 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go deleted file mode 100644 index c7787f77..00000000 --- a/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ /dev/null @@ -1,43 +0,0 @@ -package logrus - -import ( - "bytes" - "sync" -) - -var ( - bufferPool BufferPool -) - -type BufferPool interface { - Put(*bytes.Buffer) - Get() *bytes.Buffer -} - -type defaultPool struct { - pool *sync.Pool -} - -func (p *defaultPool) Put(buf *bytes.Buffer) { - p.pool.Put(buf) -} - -func (p *defaultPool) Get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -// SetBufferPool allows to replace the default logrus buffer pool -// to better meets the specific needs of an application. -func SetBufferPool(bp BufferPool) { - bufferPool = bp -} - -func init() { - SetBufferPool(&defaultPool{ - pool: &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - }, - }) -} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go deleted file mode 100644 index da67aba0..00000000 --- a/vendor/github.com/sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go deleted file mode 100644 index 71cdbbc3..00000000 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ /dev/null @@ -1,442 +0,0 @@ -package logrus - -import ( - "bytes" - "context" - "fmt" - "os" - "reflect" - "runtime" - "strings" - "sync" - "time" -) - -var ( - - // qualified package name, cached at first use - logrusPackage string - - // Positions in the call stack when tracing to report the calling method - minimumCallerDepth int - - // Used for caller information initialisation - callerInitOnce sync.Once -) - -const ( - maximumCallerDepth int = 25 - knownLogrusFrames int = 4 -) - -func init() { - // start at the bottom of the stack before the package-name cache is primed - minimumCallerDepth = 1 -} - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, -// Info, Warn, Error, Fatal or Panic is called on it. These objects can be -// reused and passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic - // This field will be set on entry firing and the value will be equal to the one in Logger struct field. - Level Level - - // Calling method, with package name - Caller *runtime.Frame - - // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic - Message string - - // When formatter is called in entry.log(), a Buffer may be set to entry - Buffer *bytes.Buffer - - // Contains the context set by the user. Useful for hook processing etc. - Context context.Context - - // err may contain a field formatting error - err string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, plus one optional. Give a little extra room. - Data: make(Fields, 6), - } -} - -func (entry *Entry) Dup() *Entry { - data := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} -} - -// Returns the bytes representation of this entry from the formatter. -func (entry *Entry) Bytes() ([]byte, error) { - return entry.Logger.Formatter.Format(entry) -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - serialized, err := entry.Bytes() - if err != nil { - return "", err - } - str := string(serialized) - return str, nil -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a context to the Entry. -func (entry *Entry) WithContext(ctx context.Context) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - fieldErr := entry.err - for k, v := range fields { - isErrField := false - if t := reflect.TypeOf(v); t != nil { - switch { - case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: - isErrField = true - } - } - if isErrField { - tmp := fmt.Sprintf("can not add field %q", k) - if fieldErr != "" { - fieldErr = entry.err + ", " + tmp - } else { - fieldErr = tmp - } - } else { - data[k] = v - } - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} -} - -// Overrides the time of the Entry. -func (entry *Entry) WithTime(t time.Time) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} -} - -// getPackageName reduces a fully qualified function name to the package name -// There really ought to be to be a better way... -func getPackageName(f string) string { - for { - lastPeriod := strings.LastIndex(f, ".") - lastSlash := strings.LastIndex(f, "/") - if lastPeriod > lastSlash { - f = f[:lastPeriod] - } else { - break - } - } - - return f -} - -// getCaller retrieves the name of the first non-logrus calling function -func getCaller() *runtime.Frame { - // cache this package's fully-qualified name - callerInitOnce.Do(func() { - pcs := make([]uintptr, maximumCallerDepth) - _ = runtime.Callers(0, pcs) - - // dynamic get the package name and the minimum caller depth - for i := 0; i < maximumCallerDepth; i++ { - funcName := runtime.FuncForPC(pcs[i]).Name() - if strings.Contains(funcName, "getCaller") { - logrusPackage = getPackageName(funcName) - break - } - } - - minimumCallerDepth = knownLogrusFrames - }) - - // Restrict the lookback frames to avoid runaway lookups - pcs := make([]uintptr, maximumCallerDepth) - depth := runtime.Callers(minimumCallerDepth, pcs) - frames := runtime.CallersFrames(pcs[:depth]) - - for f, again := frames.Next(); again; f, again = frames.Next() { - pkg := getPackageName(f.Function) - - // If the caller isn't part of this package, we're done - if pkg != logrusPackage { - return &f //nolint:scopelint - } - } - - // if we got here, we failed to find the caller's context - return nil -} - -func (entry Entry) HasCaller() (has bool) { - return entry.Logger != nil && - entry.Logger.ReportCaller && - entry.Caller != nil -} - -func (entry *Entry) log(level Level, msg string) { - var buffer *bytes.Buffer - - newEntry := entry.Dup() - - if newEntry.Time.IsZero() { - newEntry.Time = time.Now() - } - - newEntry.Level = level - newEntry.Message = msg - - newEntry.Logger.mu.Lock() - reportCaller := newEntry.Logger.ReportCaller - bufPool := newEntry.getBufferPool() - newEntry.Logger.mu.Unlock() - - if reportCaller { - newEntry.Caller = getCaller() - } - - newEntry.fireHooks() - buffer = bufPool.Get() - defer func() { - newEntry.Buffer = nil - buffer.Reset() - bufPool.Put(buffer) - }() - buffer.Reset() - newEntry.Buffer = buffer - - newEntry.write() - - newEntry.Buffer = nil - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(newEntry) - } -} - -func (entry *Entry) getBufferPool() (pool BufferPool) { - if entry.Logger.BufferPool != nil { - return entry.Logger.BufferPool - } - return bufferPool -} - -func (entry *Entry) fireHooks() { - var tmpHooks LevelHooks - entry.Logger.mu.Lock() - tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) - for k, v := range entry.Logger.Hooks { - tmpHooks[k] = v - } - entry.Logger.mu.Unlock() - - err := tmpHooks.Fire(entry.Level, entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - } -} - -func (entry *Entry) write() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - serialized, err := entry.Logger.Formatter.Format(entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - return - } - if _, err := entry.Logger.Out.Write(serialized); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } -} - -// Log will log a message at the level given as parameter. -// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. -// For this behaviour Entry.Panic or Entry.Fatal should be used instead. -func (entry *Entry) Log(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.log(level, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Trace(args ...interface{}) { - entry.Log(TraceLevel, args...) -} - -func (entry *Entry) Debug(args ...interface{}) { - entry.Log(DebugLevel, args...) -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - entry.Log(InfoLevel, args...) -} - -func (entry *Entry) Warn(args ...interface{}) { - entry.Log(WarnLevel, args...) -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - entry.Log(ErrorLevel, args...) -} - -func (entry *Entry) Fatal(args ...interface{}) { - entry.Log(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - entry.Log(PanicLevel, args...) -} - -// Entry Printf family functions - -func (entry *Entry) Logf(level Level, format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Tracef(format string, args ...interface{}) { - entry.Logf(TraceLevel, format, args...) -} - -func (entry *Entry) Debugf(format string, args ...interface{}) { - entry.Logf(DebugLevel, format, args...) -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - entry.Logf(InfoLevel, format, args...) -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - entry.Logf(WarnLevel, format, args...) -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - entry.Logf(ErrorLevel, format, args...) -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - entry.Logf(FatalLevel, format, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - entry.Logf(PanicLevel, format, args...) -} - -// Entry Println family functions - -func (entry *Entry) Logln(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Traceln(args ...interface{}) { - entry.Logln(TraceLevel, args...) -} - -func (entry *Entry) Debugln(args ...interface{}) { - entry.Logln(DebugLevel, args...) -} - -func (entry *Entry) Infoln(args ...interface{}) { - entry.Logln(InfoLevel, args...) -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - entry.Logln(WarnLevel, args...) -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - entry.Logln(ErrorLevel, args...) -} - -func (entry *Entry) Fatalln(args ...interface{}) { - entry.Logln(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - entry.Logln(PanicLevel, args...) -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go deleted file mode 100644 index 017c30ce..00000000 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ /dev/null @@ -1,270 +0,0 @@ -package logrus - -import ( - "context" - "io" - "time" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.SetOutput(out) -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.SetFormatter(formatter) -} - -// SetReportCaller sets whether the standard logger will include the calling -// method as a field. -func SetReportCaller(include bool) { - std.SetReportCaller(include) -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.SetLevel(level) -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - return std.GetLevel() -} - -// IsLevelEnabled checks if the log level of the standard logger is greater than the level param -func IsLevelEnabled(level Level) bool { - return std.IsLevelEnabled(level) -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.AddHook(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - -// WithContext creates an entry from the standard logger and adds a context to it. -func WithContext(ctx context.Context) *Entry { - return std.WithContext(ctx) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// WithTime creates an entry from the standard logger and overrides the time of -// logs generated with it. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithTime(t time.Time) *Entry { - return std.WithTime(t) -} - -// Trace logs a message at level Trace on the standard logger. -func Trace(args ...interface{}) { - std.Trace(args...) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// TraceFn logs a message from a func at level Trace on the standard logger. -func TraceFn(fn LogFunction) { - std.TraceFn(fn) -} - -// DebugFn logs a message from a func at level Debug on the standard logger. -func DebugFn(fn LogFunction) { - std.DebugFn(fn) -} - -// PrintFn logs a message from a func at level Info on the standard logger. -func PrintFn(fn LogFunction) { - std.PrintFn(fn) -} - -// InfoFn logs a message from a func at level Info on the standard logger. -func InfoFn(fn LogFunction) { - std.InfoFn(fn) -} - -// WarnFn logs a message from a func at level Warn on the standard logger. -func WarnFn(fn LogFunction) { - std.WarnFn(fn) -} - -// WarningFn logs a message from a func at level Warn on the standard logger. -func WarningFn(fn LogFunction) { - std.WarningFn(fn) -} - -// ErrorFn logs a message from a func at level Error on the standard logger. -func ErrorFn(fn LogFunction) { - std.ErrorFn(fn) -} - -// PanicFn logs a message from a func at level Panic on the standard logger. -func PanicFn(fn LogFunction) { - std.PanicFn(fn) -} - -// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. -func FatalFn(fn LogFunction) { - std.FatalFn(fn) -} - -// Tracef logs a message at level Trace on the standard logger. -func Tracef(format string, args ...interface{}) { - std.Tracef(format, args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Traceln logs a message at level Trace on the standard logger. -func Traceln(args ...interface{}) { - std.Traceln(args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go deleted file mode 100644 index 40888377..00000000 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ /dev/null @@ -1,78 +0,0 @@ -package logrus - -import "time" - -// Default key names for the default fields -const ( - defaultTimestampFormat = time.RFC3339 - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" - FieldKeyLogrusError = "logrus_error" - FieldKeyFunc = "func" - FieldKeyFile = "file" -) - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { - timeKey := fieldMap.resolve(FieldKeyTime) - if t, ok := data[timeKey]; ok { - data["fields."+timeKey] = t - delete(data, timeKey) - } - - msgKey := fieldMap.resolve(FieldKeyMsg) - if m, ok := data[msgKey]; ok { - data["fields."+msgKey] = m - delete(data, msgKey) - } - - levelKey := fieldMap.resolve(FieldKeyLevel) - if l, ok := data[levelKey]; ok { - data["fields."+levelKey] = l - delete(data, levelKey) - } - - logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) - if l, ok := data[logrusErrKey]; ok { - data["fields."+logrusErrKey] = l - delete(data, logrusErrKey) - } - - // If reportCaller is not set, 'func' will not conflict. - if reportCaller { - funcKey := fieldMap.resolve(FieldKeyFunc) - if l, ok := data[funcKey]; ok { - data["fields."+funcKey] = l - } - fileKey := fieldMap.resolve(FieldKeyFile) - if l, ok := data[fileKey]; ok { - data["fields."+fileKey] = l - } - } -} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc..00000000 --- a/vendor/github.com/sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md deleted file mode 100644 index 1bbc0f72..00000000 --- a/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Syslog Hooks for Logrus :walrus: - -## Usage - -```go -import ( - "log/syslog" - "github.com/sirupsen/logrus" - lSyslog "github.com/sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` - -If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. - -```go -import ( - "log/syslog" - "github.com/sirupsen/logrus" - lSyslog "github.com/sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` diff --git a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go deleted file mode 100644 index 02b8df38..00000000 --- a/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build !windows,!nacl,!plan9 - -package syslog - -import ( - "fmt" - "log/syslog" - "os" - - "github.com/sirupsen/logrus" -) - -// SyslogHook to send logs via syslog. -type SyslogHook struct { - Writer *syslog.Writer - SyslogNetwork string - SyslogRaddr string -} - -// Creates a hook to be added to an instance of logger. This is called with -// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` -// `if err == nil { log.Hooks.Add(hook) }` -func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { - w, err := syslog.Dial(network, raddr, priority, tag) - return &SyslogHook{w, network, raddr}, err -} - -func (hook *SyslogHook) Fire(entry *logrus.Entry) error { - line, err := entry.String() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) - return err - } - - switch entry.Level { - case logrus.PanicLevel: - return hook.Writer.Crit(line) - case logrus.FatalLevel: - return hook.Writer.Crit(line) - case logrus.ErrorLevel: - return hook.Writer.Err(line) - case logrus.WarnLevel: - return hook.Writer.Warning(line) - case logrus.InfoLevel: - return hook.Writer.Info(line) - case logrus.DebugLevel, logrus.TraceLevel: - return hook.Writer.Debug(line) - default: - return nil - } -} - -func (hook *SyslogHook) Levels() []logrus.Level { - return logrus.AllLevels -} diff --git a/vendor/github.com/sirupsen/logrus/hooks/writer/README.md b/vendor/github.com/sirupsen/logrus/hooks/writer/README.md deleted file mode 100644 index 69676309..00000000 --- a/vendor/github.com/sirupsen/logrus/hooks/writer/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# Writer Hooks for Logrus - -Send logs of given levels to any object with `io.Writer` interface. - -## Usage - -If you want for example send high level logs to `Stderr` and -logs of normal execution to `Stdout`, you could do it like this: - -```go -package main - -import ( - "io/ioutil" - "os" - - log "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/writer" -) - -func main() { - log.SetOutput(ioutil.Discard) // Send all logs to nowhere by default - - log.AddHook(&writer.Hook{ // Send logs with level higher than warning to stderr - Writer: os.Stderr, - LogLevels: []log.Level{ - log.PanicLevel, - log.FatalLevel, - log.ErrorLevel, - log.WarnLevel, - }, - }) - log.AddHook(&writer.Hook{ // Send info and debug logs to stdout - Writer: os.Stdout, - LogLevels: []log.Level{ - log.InfoLevel, - log.DebugLevel, - }, - }) - log.Info("This will go to stdout") - log.Warn("This will go to stderr") -} -``` diff --git a/vendor/github.com/sirupsen/logrus/hooks/writer/writer.go b/vendor/github.com/sirupsen/logrus/hooks/writer/writer.go deleted file mode 100644 index 1160c790..00000000 --- a/vendor/github.com/sirupsen/logrus/hooks/writer/writer.go +++ /dev/null @@ -1,29 +0,0 @@ -package writer - -import ( - "io" - - log "github.com/sirupsen/logrus" -) - -// Hook is a hook that writes logs of specified LogLevels to specified Writer -type Hook struct { - Writer io.Writer - LogLevels []log.Level -} - -// Fire will be called when some logging function is called with current hook -// It will format log entry to string and write it to appropriate writer -func (hook *Hook) Fire(entry *log.Entry) error { - line, err := entry.Bytes() - if err != nil { - return err - } - _, err = hook.Writer.Write(line) - return err -} - -// Levels define on which log levels this hook would trigger -func (hook *Hook) Levels() []log.Level { - return hook.LogLevels -} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go deleted file mode 100644 index c96dc563..00000000 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,128 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "fmt" - "runtime" -) - -type fieldKey string - -// FieldMap allows customization of the key names for default fields. -type FieldMap map[fieldKey]string - -func (f FieldMap) resolve(key fieldKey) string { - if k, ok := f[key]; ok { - return k - } - - return string(key) -} - -// JSONFormatter formats logs into parsable json -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - // The format to use is the same than for time.Format or time.Parse from the standard - // library. - // The standard Library already provides a set of predefined format. - TimestampFormat string - - // DisableTimestamp allows disabling automatic timestamps in output - DisableTimestamp bool - - // DisableHTMLEscape allows disabling html escaping in output - DisableHTMLEscape bool - - // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. - DataKey string - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &JSONFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", - // FieldKeyFunc: "@caller", - // }, - // } - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the json data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from json fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - // PrettyPrint will indent all json logs - PrettyPrint bool -} - -// Format renders a single log entry -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+4) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - - if f.DataKey != "" { - newData := make(Fields, 4) - newData[f.DataKey] = data - data = newData - } - - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - - if entry.err != "" { - data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err - } - if !f.DisableTimestamp { - data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) - } - data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message - data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() - if entry.HasCaller() { - funcVal := entry.Caller.Function - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - if funcVal != "" { - data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal - } - if fileVal != "" { - data[f.FieldMap.resolve(FieldKeyFile)] = fileVal - } - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - encoder := json.NewEncoder(b) - encoder.SetEscapeHTML(!f.DisableHTMLEscape) - if f.PrettyPrint { - encoder.SetIndent("", " ") - } - if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) - } - - return b.Bytes(), nil -} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go deleted file mode 100644 index 5ff0aef6..00000000 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ /dev/null @@ -1,417 +0,0 @@ -package logrus - -import ( - "context" - "io" - "os" - "sync" - "sync/atomic" - "time" -) - -// LogFunction For big messages, it can be more efficient to pass a function -// and only call it if the log level is actually enables rather than -// generating the log message and then checking if the level is enabled -type LogFunction func() []interface{} - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventurous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - - // Flag for whether to log caller info (off by default) - ReportCaller bool - - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. - Level Level - // Used to sync writing to the log. Locking is enabled by Default - mu MutexWrap - // Reusable empty entry - entryPool sync.Pool - // Function to exit the application, defaults to `os.Exit()` - ExitFunc exitFunc - // The buffer pool used to format the log. If it is nil, the default global - // buffer pool will be used. - BufferPool BufferPool -} - -type exitFunc func(int) - -type MutexWrap struct { - lock sync.Mutex - disabled bool -} - -func (mw *MutexWrap) Lock() { - if !mw.disabled { - mw.lock.Lock() - } -} - -func (mw *MutexWrap) Unlock() { - if !mw.disabled { - mw.lock.Unlock() - } -} - -func (mw *MutexWrap) Disable() { - mw.disabled = true -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &logrus.Logger{ -// Out: os.Stderr, -// Formatter: new(logrus.TextFormatter), -// Hooks: make(logrus.LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - ExitFunc: os.Exit, - ReportCaller: false, - } -} - -func (logger *Logger) newEntry() *Entry { - entry, ok := logger.entryPool.Get().(*Entry) - if ok { - return entry - } - return NewEntry(logger) -} - -func (logger *Logger) releaseEntry(entry *Entry) { - entry.Data = map[string]interface{}{} - logger.entryPool.Put(entry) -} - -// WithField allocates a new entry and adds a field to it. -// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to -// this new returned entry. -// If you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithFields(fields) -} - -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithError(err) -} - -// Add a context to the log entry. -func (logger *Logger) WithContext(ctx context.Context) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithContext(ctx) -} - -// Overrides the time of the log entry. -func (logger *Logger) WithTime(t time.Time) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithTime(t) -} - -func (logger *Logger) Logf(level Level, format string, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logf(level, format, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Tracef(format string, args ...interface{}) { - logger.Logf(TraceLevel, format, args...) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - logger.Logf(DebugLevel, format, args...) -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - logger.Logf(InfoLevel, format, args...) -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - entry := logger.newEntry() - entry.Printf(format, args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - logger.Logf(WarnLevel, format, args...) -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - logger.Warnf(format, args...) -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - logger.Logf(ErrorLevel, format, args...) -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - logger.Logf(FatalLevel, format, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - logger.Logf(PanicLevel, format, args...) -} - -// Log will log a message at the level given as parameter. -// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. -// For this behaviour Logger.Panic or Logger.Fatal should be used instead. -func (logger *Logger) Log(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) LogFn(level Level, fn LogFunction) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, fn()...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Trace(args ...interface{}) { - logger.Log(TraceLevel, args...) -} - -func (logger *Logger) Debug(args ...interface{}) { - logger.Log(DebugLevel, args...) -} - -func (logger *Logger) Info(args ...interface{}) { - logger.Log(InfoLevel, args...) -} - -func (logger *Logger) Print(args ...interface{}) { - entry := logger.newEntry() - entry.Print(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warn(args ...interface{}) { - logger.Log(WarnLevel, args...) -} - -func (logger *Logger) Warning(args ...interface{}) { - logger.Warn(args...) -} - -func (logger *Logger) Error(args ...interface{}) { - logger.Log(ErrorLevel, args...) -} - -func (logger *Logger) Fatal(args ...interface{}) { - logger.Log(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - logger.Log(PanicLevel, args...) -} - -func (logger *Logger) TraceFn(fn LogFunction) { - logger.LogFn(TraceLevel, fn) -} - -func (logger *Logger) DebugFn(fn LogFunction) { - logger.LogFn(DebugLevel, fn) -} - -func (logger *Logger) InfoFn(fn LogFunction) { - logger.LogFn(InfoLevel, fn) -} - -func (logger *Logger) PrintFn(fn LogFunction) { - entry := logger.newEntry() - entry.Print(fn()...) - logger.releaseEntry(entry) -} - -func (logger *Logger) WarnFn(fn LogFunction) { - logger.LogFn(WarnLevel, fn) -} - -func (logger *Logger) WarningFn(fn LogFunction) { - logger.WarnFn(fn) -} - -func (logger *Logger) ErrorFn(fn LogFunction) { - logger.LogFn(ErrorLevel, fn) -} - -func (logger *Logger) FatalFn(fn LogFunction) { - logger.LogFn(FatalLevel, fn) - logger.Exit(1) -} - -func (logger *Logger) PanicFn(fn LogFunction) { - logger.LogFn(PanicLevel, fn) -} - -func (logger *Logger) Logln(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logln(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Traceln(args ...interface{}) { - logger.Logln(TraceLevel, args...) -} - -func (logger *Logger) Debugln(args ...interface{}) { - logger.Logln(DebugLevel, args...) -} - -func (logger *Logger) Infoln(args ...interface{}) { - logger.Logln(InfoLevel, args...) -} - -func (logger *Logger) Println(args ...interface{}) { - entry := logger.newEntry() - entry.Println(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnln(args ...interface{}) { - logger.Logln(WarnLevel, args...) -} - -func (logger *Logger) Warningln(args ...interface{}) { - logger.Warnln(args...) -} - -func (logger *Logger) Errorln(args ...interface{}) { - logger.Logln(ErrorLevel, args...) -} - -func (logger *Logger) Fatalln(args ...interface{}) { - logger.Logln(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - logger.Logln(PanicLevel, args...) -} - -func (logger *Logger) Exit(code int) { - runHandlers() - if logger.ExitFunc == nil { - logger.ExitFunc = os.Exit - } - logger.ExitFunc(code) -} - -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. -func (logger *Logger) SetNoLock() { - logger.mu.Disable() -} - -func (logger *Logger) level() Level { - return Level(atomic.LoadUint32((*uint32)(&logger.Level))) -} - -// SetLevel sets the logger level. -func (logger *Logger) SetLevel(level Level) { - atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) -} - -// GetLevel returns the logger level. -func (logger *Logger) GetLevel() Level { - return logger.level() -} - -// AddHook adds a hook to the logger hooks. -func (logger *Logger) AddHook(hook Hook) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Hooks.Add(hook) -} - -// IsLevelEnabled checks if the log level of the logger is greater than the level param -func (logger *Logger) IsLevelEnabled(level Level) bool { - return logger.level() >= level -} - -// SetFormatter sets the logger formatter. -func (logger *Logger) SetFormatter(formatter Formatter) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Formatter = formatter -} - -// SetOutput sets the logger output. -func (logger *Logger) SetOutput(output io.Writer) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Out = output -} - -func (logger *Logger) SetReportCaller(reportCaller bool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.ReportCaller = reportCaller -} - -// ReplaceHooks replaces the logger hooks and returns the old ones -func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { - logger.mu.Lock() - oldHooks := logger.Hooks - logger.Hooks = hooks - logger.mu.Unlock() - return oldHooks -} - -// SetBufferPool sets the logger buffer pool. -func (logger *Logger) SetBufferPool(pool BufferPool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.BufferPool = pool -} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go deleted file mode 100644 index 2f16224c..00000000 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ /dev/null @@ -1,186 +0,0 @@ -package logrus - -import ( - "fmt" - "log" - "strings" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint32 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - if b, err := level.MarshalText(); err == nil { - return string(b) - } else { - return "unknown" - } -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - case "trace": - return TraceLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (level *Level) UnmarshalText(text []byte) error { - l, err := ParseLevel(string(text)) - if err != nil { - return err - } - - *level = l - - return nil -} - -func (level Level) MarshalText() ([]byte, error) { - switch level { - case TraceLevel: - return []byte("trace"), nil - case DebugLevel: - return []byte("debug"), nil - case InfoLevel: - return []byte("info"), nil - case WarnLevel: - return []byte("warning"), nil - case ErrorLevel: - return []byte("error"), nil - case FatalLevel: - return []byte("fatal"), nil - case PanicLevel: - return []byte("panic"), nil - } - - return nil, fmt.Errorf("not a valid logrus level %d", level) -} - -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, - TraceLevel, -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel - // TraceLevel level. Designates finer-grained informational events than the Debug. - TraceLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) - - // IsDebugEnabled() bool - // IsInfoEnabled() bool - // IsWarnEnabled() bool - // IsErrorEnabled() bool - // IsFatalEnabled() bool - // IsPanicEnabled() bool -} - -// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is -// here for consistancy. Do not use. Use Logger or Entry instead. -type Ext1FieldLogger interface { - FieldLogger - Tracef(format string, args ...interface{}) - Trace(args ...interface{}) - Traceln(args ...interface{}) -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go deleted file mode 100644 index 2403de98..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return true -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go deleted file mode 100644 index 49978998..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build darwin dragonfly freebsd netbsd openbsd -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go deleted file mode 100644 index ebdae3ec..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_js.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build js - -package logrus - -func isTerminal(fd int) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go deleted file mode 100644 index 97af92c6..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build js nacl plan9 - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go deleted file mode 100644 index 3293fb3c..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !appengine,!js,!windows,!nacl,!plan9 - -package logrus - -import ( - "io" - "os" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - return isTerminal(int(v.Fd())) - default: - return false - } -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go deleted file mode 100644 index f6710b3b..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package logrus - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermio(fd, unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go deleted file mode 100644 index 04748b85..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux aix zos -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go deleted file mode 100644 index 2879eb50..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !appengine,!js,windows - -package logrus - -import ( - "io" - "os" - - "golang.org/x/sys/windows" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - handle := windows.Handle(v.Fd()) - var mode uint32 - if err := windows.GetConsoleMode(handle, &mode); err != nil { - return false - } - mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - if err := windows.SetConsoleMode(handle, mode); err != nil { - return false - } - return true - } - return false -} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go deleted file mode 100644 index be2c6efe..00000000 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,339 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "os" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - "unicode/utf8" -) - -const ( - red = 31 - yellow = 33 - blue = 36 - gray = 37 -) - -var baseTimestamp time.Time - -func init() { - baseTimestamp = time.Now() -} - -// TextFormatter formats logs into text -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Force quoting of all values - ForceQuote bool - - // DisableQuote disables quoting for all values. - // DisableQuote will have a lower priority than ForceQuote. - // If both of them are set to true, quote will be forced on all values. - DisableQuote bool - - // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ - EnvironmentOverrideColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed. - // The format to use is the same than for time.Format or time.Parse from the standard - // library. - // The standard Library already provides a set of predefined format. - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool - - // The keys sorting function, when uninitialized it uses sort.Strings. - SortingFunc func([]string) - - // Disables the truncation of the level text to 4 characters. - DisableLevelTruncation bool - - // PadLevelText Adds padding the level text so that all the levels output at the same length - // PadLevelText is a superset of the DisableLevelTruncation option - PadLevelText bool - - // QuoteEmptyFields will wrap empty fields in quotes if true - QuoteEmptyFields bool - - // Whether the logger's out is to a terminal - isTerminal bool - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &TextFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message"}} - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - terminalInitOnce sync.Once - - // The max length of the level text, generated dynamically on init - levelTextMaxLength int -} - -func (f *TextFormatter) init(entry *Entry) { - if entry.Logger != nil { - f.isTerminal = checkIfTerminal(entry.Logger.Out) - } - // Get the max length of the level text - for _, level := range AllLevels { - levelTextLength := utf8.RuneCount([]byte(level.String())) - if levelTextLength > f.levelTextMaxLength { - f.levelTextMaxLength = levelTextLength - } - } -} - -func (f *TextFormatter) isColored() bool { - isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) - - if f.EnvironmentOverrideColors { - switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { - case ok && force != "0": - isColored = true - case ok && force == "0", os.Getenv("CLICOLOR") == "0": - isColored = false - } - } - - return isColored && !f.DisableColors -} - -// Format renders a single log entry -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields) - for k, v := range entry.Data { - data[k] = v - } - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - - var funcVal, fileVal string - - fixedKeys := make([]string, 0, 4+len(data)) - if !f.DisableTimestamp { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) - } - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) - if entry.Message != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) - } - if entry.err != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) - } - if entry.HasCaller() { - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } else { - funcVal = entry.Caller.Function - fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - } - - if funcVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) - } - if fileVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) - } - } - - if !f.DisableSorting { - if f.SortingFunc == nil { - sort.Strings(keys) - fixedKeys = append(fixedKeys, keys...) - } else { - if !f.isColored() { - fixedKeys = append(fixedKeys, keys...) - f.SortingFunc(fixedKeys) - } else { - f.SortingFunc(keys) - } - } - } else { - fixedKeys = append(fixedKeys, keys...) - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - f.terminalInitOnce.Do(func() { f.init(entry) }) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - if f.isColored() { - f.printColored(b, entry, keys, data, timestampFormat) - } else { - - for _, key := range fixedKeys { - var value interface{} - switch { - case key == f.FieldMap.resolve(FieldKeyTime): - value = entry.Time.Format(timestampFormat) - case key == f.FieldMap.resolve(FieldKeyLevel): - value = entry.Level.String() - case key == f.FieldMap.resolve(FieldKeyMsg): - value = entry.Message - case key == f.FieldMap.resolve(FieldKeyLogrusError): - value = entry.err - case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): - value = funcVal - case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): - value = fileVal - default: - value = data[key] - } - f.appendKeyValue(b, key, value) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel, TraceLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - case InfoLevel: - levelColor = blue - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String()) - if !f.DisableLevelTruncation && !f.PadLevelText { - levelText = levelText[0:4] - } - if f.PadLevelText { - // Generates the format string used in the next line, for example "%-6s" or "%-7s". - // Based on the max level text length. - formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" - // Formats the level text by appending spaces up to the max length, for example: - // - "INFO " - // - "WARNING" - levelText = fmt.Sprintf(formatString, levelText) - } - - // Remove a single newline if it already exists in the message to keep - // the behavior of logrus text_formatter the same as the stdlib log package - entry.Message = strings.TrimSuffix(entry.Message, "\n") - - caller := "" - if entry.HasCaller() { - funcVal := fmt.Sprintf("%s()", entry.Caller.Function) - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - - if fileVal == "" { - caller = funcVal - } else if funcVal == "" { - caller = fileVal - } else { - caller = fileVal + " " + funcVal - } - } - - switch { - case f.DisableTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) - case !f.FullTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) - default: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) - } - for _, k := range keys { - v := data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) - f.appendValue(b, v) - } -} - -func (f *TextFormatter) needsQuoting(text string) bool { - if f.ForceQuote { - return true - } - if f.QuoteEmptyFields && len(text) == 0 { - return true - } - if f.DisableQuote { - return false - } - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { - return true - } - } - return false -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - if b.Len() > 0 { - b.WriteByte(' ') - } - b.WriteString(key) - b.WriteByte('=') - f.appendValue(b, value) -} - -func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - stringVal, ok := value.(string) - if !ok { - stringVal = fmt.Sprint(value) - } - - if !f.needsQuoting(stringVal) { - b.WriteString(stringVal) - } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) - } -} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go deleted file mode 100644 index 72e8e3a1..00000000 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ /dev/null @@ -1,70 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -// Writer at INFO level. See WriterLevel for details. -func (logger *Logger) Writer() *io.PipeWriter { - return logger.WriterLevel(InfoLevel) -} - -// WriterLevel returns an io.Writer that can be used to write arbitrary text to -// the logger at the given log level. Each line written to the writer will be -// printed in the usual way using formatters and hooks. The writer is part of an -// io.Pipe and it is the callers responsibility to close the writer when done. -// This can be used to override the standard library logger easily. -func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { - return NewEntry(logger).WriterLevel(level) -} - -func (entry *Entry) Writer() *io.PipeWriter { - return entry.WriterLevel(InfoLevel) -} - -func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { - reader, writer := io.Pipe() - - var printFunc func(args ...interface{}) - - switch level { - case TraceLevel: - printFunc = entry.Trace - case DebugLevel: - printFunc = entry.Debug - case InfoLevel: - printFunc = entry.Info - case WarnLevel: - printFunc = entry.Warn - case ErrorLevel: - printFunc = entry.Error - case FatalLevel: - printFunc = entry.Fatal - case PanicLevel: - printFunc = entry.Panic - default: - printFunc = entry.Print - } - - go entry.writerScanner(reader, printFunc) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - printFunc(scanner.Text()) - } - if err := scanner.Err(); err != nil { - entry.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go deleted file mode 100644 index d2e98d42..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 -// and the extendable output function (XOF) BLAKE2Xb. -// -// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and -// produces digests of any size between 1 and 64 bytes. -// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf -// and for BLAKE2Xb see https://blake2.net/blake2x.pdf -// -// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). -// If you need a secret-key MAC (message authentication code), use the New512 -// function with a non-nil key. -// -// BLAKE2X is a construction to compute hash values larger than 64 bytes. It -// can produce hash values between 0 and 4 GiB. -package blake2b - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - // The blocksize of BLAKE2b in bytes. - BlockSize = 128 - // The hash size of BLAKE2b-512 in bytes. - Size = 64 - // The hash size of BLAKE2b-384 in bytes. - Size384 = 48 - // The hash size of BLAKE2b-256 in bytes. - Size256 = 32 -) - -var ( - useAVX2 bool - useAVX bool - useSSE4 bool -) - -var ( - errKeySize = errors.New("blake2b: invalid key size") - errHashSize = errors.New("blake2b: invalid hash size") -) - -var iv = [8]uint64{ - 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, -} - -// Sum512 returns the BLAKE2b-512 checksum of the data. -func Sum512(data []byte) [Size]byte { - var sum [Size]byte - checkSum(&sum, Size, data) - return sum -} - -// Sum384 returns the BLAKE2b-384 checksum of the data. -func Sum384(data []byte) [Size384]byte { - var sum [Size]byte - var sum384 [Size384]byte - checkSum(&sum, Size384, data) - copy(sum384[:], sum[:Size384]) - return sum384 -} - -// Sum256 returns the BLAKE2b-256 checksum of the data. -func Sum256(data []byte) [Size256]byte { - var sum [Size]byte - var sum256 [Size256]byte - checkSum(&sum, Size256, data) - copy(sum256[:], sum[:Size256]) - return sum256 -} - -// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } - -// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } - -// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } - -// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. -// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. -// The hash size can be a value between 1 and 64 but it is highly recommended to use -// values equal or greater than: -// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). -// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). -// When the key is nil, the returned hash.Hash implements BinaryMarshaler -// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. -func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } - -func newDigest(hashSize int, key []byte) (*digest, error) { - if hashSize < 1 || hashSize > Size { - return nil, errHashSize - } - if len(key) > Size { - return nil, errKeySize - } - d := &digest{ - size: hashSize, - keyLen: len(key), - } - copy(d.key[:], key) - d.Reset() - return d, nil -} - -func checkSum(sum *[Size]byte, hashSize int, data []byte) { - h := iv - h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) - var c [2]uint64 - - if length := len(data); length > BlockSize { - n := length &^ (BlockSize - 1) - if length == n { - n -= BlockSize - } - hashBlocks(&h, &c, 0, data[:n]) - data = data[n:] - } - - var block [BlockSize]byte - offset := copy(block[:], data) - remaining := uint64(BlockSize - offset) - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - for i, v := range h[:(hashSize+7)/8] { - binary.LittleEndian.PutUint64(sum[8*i:], v) - } -} - -type digest struct { - h [8]uint64 - c [2]uint64 - size int - block [BlockSize]byte - offset int - - key [BlockSize]byte - keyLen int -} - -const ( - magic = "b2b" - marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 -) - -func (d *digest) MarshalBinary() ([]byte, error) { - if d.keyLen != 0 { - return nil, errors.New("crypto/blake2b: cannot marshal MACs") - } - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - for i := 0; i < 8; i++ { - b = appendUint64(b, d.h[i]) - } - b = appendUint64(b, d.c[0]) - b = appendUint64(b, d.c[1]) - // Maximum value for size is 64 - b = append(b, byte(d.size)) - b = append(b, d.block[:]...) - b = append(b, byte(d.offset)) - return b, nil -} - -func (d *digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("crypto/blake2b: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("crypto/blake2b: invalid hash state size") - } - b = b[len(magic):] - for i := 0; i < 8; i++ { - b, d.h[i] = consumeUint64(b) - } - b, d.c[0] = consumeUint64(b) - b, d.c[1] = consumeUint64(b) - d.size = int(b[0]) - b = b[1:] - copy(d.block[:], b[:BlockSize]) - b = b[BlockSize:] - d.offset = int(b[0]) - return nil -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Size() int { return d.size } - -func (d *digest) Reset() { - d.h = iv - d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) - d.offset, d.c[0], d.c[1] = 0, 0, 0 - if d.keyLen > 0 { - d.block = d.key - d.offset = BlockSize - } -} - -func (d *digest) Write(p []byte) (n int, err error) { - n = len(p) - - if d.offset > 0 { - remaining := BlockSize - d.offset - if n <= remaining { - d.offset += copy(d.block[d.offset:], p) - return - } - copy(d.block[d.offset:], p[:remaining]) - hashBlocks(&d.h, &d.c, 0, d.block[:]) - d.offset = 0 - p = p[remaining:] - } - - if length := len(p); length > BlockSize { - nn := length &^ (BlockSize - 1) - if length == nn { - nn -= BlockSize - } - hashBlocks(&d.h, &d.c, 0, p[:nn]) - p = p[nn:] - } - - if len(p) > 0 { - d.offset += copy(d.block[:], p) - } - - return -} - -func (d *digest) Sum(sum []byte) []byte { - var hash [Size]byte - d.finalize(&hash) - return append(sum, hash[:d.size]...) -} - -func (d *digest) finalize(hash *[Size]byte) { - var block [BlockSize]byte - copy(block[:], d.block[:d.offset]) - remaining := uint64(BlockSize - d.offset) - - c := d.c - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - h := d.h - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - for i, v := range h { - binary.LittleEndian.PutUint64(hash[8*i:], v) - } -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.BigEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func appendUint32(b []byte, x uint32) []byte { - var a [4]byte - binary.BigEndian.PutUint32(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := binary.BigEndian.Uint64(b) - return b[8:], x -} - -func consumeUint32(b []byte) ([]byte, uint32) { - x := binary.BigEndian.Uint32(b) - return b[4:], x -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go deleted file mode 100644 index 56bfaaa1..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useAVX2 = cpu.X86.HasAVX2 - useAVX = cpu.X86.HasAVX - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - switch { - case useAVX2: - hashBlocksAVX2(h, c, flag, blocks) - case useAVX: - hashBlocksAVX(h, c, flag, blocks) - case useSSE4: - hashBlocksSSE4(h, c, flag, blocks) - default: - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s deleted file mode 100644 index 4b9daa18..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ /dev/null @@ -1,745 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego - -#include "textflag.h" - -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - -// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 - VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) - -loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 - JGE noinc - INCQ R9 - MOVQ R9, 8(DX) - -noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) - VZEROUPPER - - RET - -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE - -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF - -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ - -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) - -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 - -// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 - VMOVDQA X0, X8 - VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 - VMOVDQU 16(AX), X11 - VMOVDQU 32(AX), X2 - VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - - VMOVDQA X10, X0 - VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - - VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() - VMOVDQA X12, 16(R10) - VMOVDQA X13, 32(R10) - VMOVDQA X14, 48(R10) - VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) - VMOVDQA X12, 80(R10) - VMOVDQA X13, 96(R10) - VMOVDQA X14, 112(R10) - VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) - VMOVDQA X12, 144(R10) - VMOVDQA X13, 160(R10) - VMOVDQA X14, 176(R10) - VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() - VMOVDQA X12, 208(R10) - VMOVDQA X13, 224(R10) - VMOVDQA X14, 240(R10) - VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - VMOVDQU 32(AX), X14 - VMOVDQU 48(AX), X15 - VPXOR X0, X10, X10 - VPXOR X1, X11, X11 - VPXOR X2, X14, X14 - VPXOR X3, X15, X15 - VPXOR X4, X10, X10 - VPXOR X5, X11, X11 - VPXOR X6, X14, X2 - VPXOR X7, X15, X3 - VMOVDQU X2, 32(AX) - VMOVDQU X3, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - VMOVDQU X10, 0(AX) - VMOVDQU X11, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - VZEROUPPER - - RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go deleted file mode 100644 index 5fa1b328..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 && amd64 && gc && !purego -// +build !go1.7,amd64,gc,!purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - if useSSE4 { - hashBlocksSSE4(h, c, flag, blocks) - } else { - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s deleted file mode 100644 index ae75eb9a..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - -// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 - MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go deleted file mode 100644 index 3168a8aa..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "encoding/binary" - "math/bits" -) - -// the precomputed values for BLAKE2b -// there are 12 16-byte arrays - one for each round -// the entries are calculated from the sigma constants. -var precomputed = [12][16]byte{ - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, - {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, - {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, - {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, - {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, - {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, - {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, - {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, - {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second -} - -func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - var m [16]uint64 - c0, c1 := c[0], c[1] - - for i := 0; i < len(blocks); { - c0 += BlockSize - if c0 < BlockSize { - c1++ - } - - v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] - v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] - v12 ^= c0 - v13 ^= c1 - v14 ^= flag - - for j := range m { - m[j] = binary.LittleEndian.Uint64(blocks[i:]) - i += 8 - } - - for j := range precomputed { - s := &(precomputed[j]) - - v0 += m[s[0]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft64(v12, -32) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft64(v4, -24) - v1 += m[s[1]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft64(v13, -32) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft64(v5, -24) - v2 += m[s[2]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft64(v14, -32) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft64(v6, -24) - v3 += m[s[3]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft64(v15, -32) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft64(v7, -24) - - v0 += m[s[4]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft64(v12, -16) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft64(v4, -63) - v1 += m[s[5]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft64(v13, -16) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft64(v5, -63) - v2 += m[s[6]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft64(v14, -16) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft64(v6, -63) - v3 += m[s[7]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft64(v15, -16) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft64(v7, -63) - - v0 += m[s[8]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft64(v15, -32) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft64(v5, -24) - v1 += m[s[9]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft64(v12, -32) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft64(v6, -24) - v2 += m[s[10]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft64(v13, -32) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft64(v7, -24) - v3 += m[s[11]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft64(v14, -32) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft64(v4, -24) - - v0 += m[s[12]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft64(v15, -16) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft64(v5, -63) - v1 += m[s[13]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft64(v12, -16) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft64(v6, -63) - v2 += m[s[14]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft64(v13, -16) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft64(v7, -63) - v3 += m[s[15]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft64(v14, -16) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft64(v4, -63) - - } - - h[0] ^= v0 ^ v8 - h[1] ^= v1 ^ v9 - h[2] ^= v2 ^ v10 - h[3] ^= v3 ^ v11 - h[4] ^= v4 ^ v12 - h[5] ^= v5 ^ v13 - h[6] ^= v6 ^ v14 - h[7] ^= v7 ^ v15 - } - c[0], c[1] = c0, c1 -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go deleted file mode 100644 index b0137cdf..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || purego || !gc -// +build !amd64 purego !gc - -package blake2b - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - hashBlocksGeneric(h, c, flag, blocks) -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go deleted file mode 100644 index 52c414db..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2x.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "encoding/binary" - "errors" - "io" -) - -// XOF defines the interface to hash functions that -// support arbitrary-length output. -type XOF interface { - // Write absorbs more data into the hash's state. It panics if called - // after Read. - io.Writer - - // Read reads more output from the hash. It returns io.EOF if the limit - // has been reached. - io.Reader - - // Clone returns a copy of the XOF in its current state. - Clone() XOF - - // Reset resets the XOF to its initial state. - Reset() -} - -// OutputLengthUnknown can be used as the size argument to NewXOF to indicate -// the length of the output is not known in advance. -const OutputLengthUnknown = 0 - -// magicUnknownOutputLength is a magic value for the output size that indicates -// an unknown number of output bytes. -const magicUnknownOutputLength = (1 << 32) - 1 - -// maxOutputLength is the absolute maximum number of bytes to produce when the -// number of output bytes is unknown. -const maxOutputLength = (1 << 32) * 64 - -// NewXOF creates a new variable-output-length hash. The hash either produce a -// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes -// (size == OutputLengthUnknown). In the latter case, an absolute limit of -// 256GiB applies. -// -// A non-nil key turns the hash into a MAC. The key must between -// zero and 32 bytes long. -func NewXOF(size uint32, key []byte) (XOF, error) { - if len(key) > Size { - return nil, errKeySize - } - if size == magicUnknownOutputLength { - // 2^32-1 indicates an unknown number of bytes and thus isn't a - // valid length. - return nil, errors.New("blake2b: XOF length too large") - } - if size == OutputLengthUnknown { - size = magicUnknownOutputLength - } - x := &xof{ - d: digest{ - size: Size, - keyLen: len(key), - }, - length: size, - } - copy(x.d.key[:], key) - x.Reset() - return x, nil -} - -type xof struct { - d digest - length uint32 - remaining uint64 - cfg, root, block [Size]byte - offset int - nodeOffset uint32 - readMode bool -} - -func (x *xof) Write(p []byte) (n int, err error) { - if x.readMode { - panic("blake2b: write to XOF after read") - } - return x.d.Write(p) -} - -func (x *xof) Clone() XOF { - clone := *x - return &clone -} - -func (x *xof) Reset() { - x.cfg[0] = byte(Size) - binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length - binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length - x.cfg[17] = byte(Size) // inner hash size - - x.d.Reset() - x.d.h[1] ^= uint64(x.length) << 32 - - x.remaining = uint64(x.length) - if x.remaining == magicUnknownOutputLength { - x.remaining = maxOutputLength - } - x.offset, x.nodeOffset = 0, 0 - x.readMode = false -} - -func (x *xof) Read(p []byte) (n int, err error) { - if !x.readMode { - x.d.finalize(&x.root) - x.readMode = true - } - - if x.remaining == 0 { - return 0, io.EOF - } - - n = len(p) - if uint64(n) > x.remaining { - n = int(x.remaining) - p = p[:n] - } - - if x.offset > 0 { - blockRemaining := Size - x.offset - if n < blockRemaining { - x.offset += copy(p, x.block[x.offset:]) - x.remaining -= uint64(n) - return - } - copy(p, x.block[x.offset:]) - p = p[blockRemaining:] - x.offset = 0 - x.remaining -= uint64(blockRemaining) - } - - for len(p) >= Size { - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - copy(p, x.block[:]) - p = p[Size:] - x.remaining -= uint64(Size) - } - - if todo := len(p); todo > 0 { - if x.remaining < uint64(Size) { - x.cfg[0] = byte(x.remaining) - } - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - x.offset = copy(p, x.block[:todo]) - x.remaining -= uint64(todo) - } - return -} - -func (d *digest) initConfig(cfg *[Size]byte) { - d.offset, d.c[0], d.c[1] = 0, 0, 0 - for i := range d.h { - d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go deleted file mode 100644 index 9d863396..00000000 --- a/vendor/golang.org/x/crypto/blake2b/register.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package blake2b - -import ( - "crypto" - "hash" -) - -func init() { - newHash256 := func() hash.Hash { - h, _ := New256(nil) - return h - } - newHash384 := func() hash.Hash { - h, _ := New384(nil) - return h - } - - newHash512 := func() hash.Hash { - h, _ := New512(nil) - return h - } - - crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) - crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) - crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index 00f963ea..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. -// -// Starting in Go 1.20, this package is a wrapper for the X25519 implementation -// in the crypto/ecdh package. -package curve25519 // import "golang.org/x/crypto/curve25519" - -// ScalarMult sets dst to the product scalar * point. -// -// Deprecated: when provided a low-order point, ScalarMult will set dst to all -// zeroes, irrespective of the scalar. Instead, use the X25519 function, which -// will return an error. -func ScalarMult(dst, scalar, point *[32]byte) { - scalarMult(dst, scalar, point) -} - -// ScalarBaseMult sets dst to the product scalar * base where base is the -// standard generator. -// -// It is recommended to use the X25519 function with Basepoint instead, as -// copying into fixed size arrays can lead to unexpected bugs. -func ScalarBaseMult(dst, scalar *[32]byte) { - scalarBaseMult(dst, scalar) -} - -const ( - // ScalarSize is the size of the scalar input to X25519. - ScalarSize = 32 - // PointSize is the size of the point input to X25519. - PointSize = 32 -) - -// Basepoint is the canonical Curve25519 generator. -var Basepoint []byte - -var basePoint = [32]byte{9} - -func init() { Basepoint = basePoint[:] } - -// X25519 returns the result of the scalar multiplication (scalar * point), -// according to RFC 7748, Section 5. scalar, point and the return value are -// slices of 32 bytes. -// -// scalar can be generated at random, for example with crypto/rand. point should -// be either Basepoint or the output of another X25519 call. -// -// If point is Basepoint (but not if it's a different slice with the same -// contents) a precomputed implementation might be used for performance. -func X25519(scalar, point []byte) ([]byte, error) { - // Outline the body of function, to let the allocation be inlined in the - // caller, and possibly avoid escaping to the heap. - var dst [32]byte - return x25519(&dst, scalar, point) -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go b/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go deleted file mode 100644 index ba647e8d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package curve25519 - -import ( - "crypto/subtle" - "errors" - "strconv" - - "golang.org/x/crypto/curve25519/internal/field" -) - -func scalarMult(dst, scalar, point *[32]byte) { - var e [32]byte - - copy(e[:], scalar[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element - x1.SetBytes(point[:]) - x2.One() - x3.Set(&x1) - z3.One() - - swap := 0 - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int(b) - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - swap = int(b) - - tmp0.Subtract(&x3, &z3) - tmp1.Subtract(&x2, &z2) - x2.Add(&x2, &z2) - z2.Add(&x3, &z3) - z3.Multiply(&tmp0, &x2) - z2.Multiply(&z2, &tmp1) - tmp0.Square(&tmp1) - tmp1.Square(&x2) - x3.Add(&z3, &z2) - z2.Subtract(&z3, &z2) - x2.Multiply(&tmp1, &tmp0) - tmp1.Subtract(&tmp1, &tmp0) - z2.Square(&z2) - - z3.Mult32(&tmp1, 121666) - x3.Square(&x3) - tmp0.Add(&tmp0, &z3) - z3.Multiply(&x1, &z2) - z2.Multiply(&tmp1, &tmp0) - } - - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - - z2.Invert(&z2) - x2.Multiply(&x2, &z2) - copy(dst[:], x2.Bytes()) -} - -func scalarBaseMult(dst, scalar *[32]byte) { - checkBasepoint() - scalarMult(dst, scalar, &basePoint) -} - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32") - } - if l := len(point); l != 32 { - return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32") - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - scalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - scalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, errors.New("bad input point: low order point") - } - } - return dst[:], nil -} - -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go b/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go deleted file mode 100644 index 627df497..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package curve25519 - -import "crypto/ecdh" - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - curve := ecdh.X25519() - pub, err := curve.NewPublicKey(point) - if err != nil { - return nil, err - } - priv, err := curve.NewPrivateKey(scalar) - if err != nil { - return nil, err - } - out, err := priv.ECDH(pub) - if err != nil { - return nil, err - } - copy(dst[:], out) - return dst[:], nil -} - -func scalarMult(dst, scalar, point *[32]byte) { - if _, err := x25519(dst, scalar[:], point[:]); err != nil { - // The only error condition for x25519 when the inputs are 32 bytes long - // is if the output would have been the all-zero value. - for i := range dst { - dst[i] = 0 - } - } -} - -func scalarBaseMult(dst, scalar *[32]byte) { - curve := ecdh.X25519() - priv, err := curve.NewPrivateKey(scalar[:]) - if err != nil { - panic("curve25519: internal error: scalarBaseMult was not 32 bytes") - } - copy(dst[:], priv.PublicKey().Bytes()) -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/README b/vendor/golang.org/x/crypto/curve25519/internal/field/README deleted file mode 100644 index e25bca7d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/README +++ /dev/null @@ -1,7 +0,0 @@ -This package is kept in sync with crypto/ed25519/internal/edwards25519/field in -the standard library. - -If there are any changes in the standard library that need to be synced to this -package, run sync.sh. It will not overwrite any local changes made since the -previous sync, so it's ok to land changes in this package first, and then sync -to the standard library later. diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go deleted file mode 100644 index ca841ad9..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package field implements fast arithmetic modulo 2^255-19. -package field - -import ( - "crypto/subtle" - "encoding/binary" - "math/bits" -) - -// Element represents an element of the field GF(2^255-19). Note that this -// is not a cryptographically secure group, and should only be used to interact -// with edwards25519.Point coordinates. -// -// This type works similarly to math/big.Int, and all arguments and receivers -// are allowed to alias. -// -// The zero value is a valid zero element. -type Element struct { - // An element t represents the integer - // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 - // - // Between operations, all limbs are expected to be lower than 2^52. - l0 uint64 - l1 uint64 - l2 uint64 - l3 uint64 - l4 uint64 -} - -const maskLow51Bits uint64 = (1 << 51) - 1 - -var feZero = &Element{0, 0, 0, 0, 0} - -// Zero sets v = 0, and returns v. -func (v *Element) Zero() *Element { - *v = *feZero - return v -} - -var feOne = &Element{1, 0, 0, 0, 0} - -// One sets v = 1, and returns v. -func (v *Element) One() *Element { - *v = *feOne - return v -} - -// reduce reduces v modulo 2^255 - 19 and returns it. -func (v *Element) reduce() *Element { - v.carryPropagate() - - // After the light reduction we now have a field element representation - // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. - - // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, - // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. - c := (v.l0 + 19) >> 51 - c = (v.l1 + c) >> 51 - c = (v.l2 + c) >> 51 - c = (v.l3 + c) >> 51 - c = (v.l4 + c) >> 51 - - // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's - // effectively applying the reduction identity to the carry. - v.l0 += 19 * c - - v.l1 += v.l0 >> 51 - v.l0 = v.l0 & maskLow51Bits - v.l2 += v.l1 >> 51 - v.l1 = v.l1 & maskLow51Bits - v.l3 += v.l2 >> 51 - v.l2 = v.l2 & maskLow51Bits - v.l4 += v.l3 >> 51 - v.l3 = v.l3 & maskLow51Bits - // no additional carry - v.l4 = v.l4 & maskLow51Bits - - return v -} - -// Add sets v = a + b, and returns v. -func (v *Element) Add(a, b *Element) *Element { - v.l0 = a.l0 + b.l0 - v.l1 = a.l1 + b.l1 - v.l2 = a.l2 + b.l2 - v.l3 = a.l3 + b.l3 - v.l4 = a.l4 + b.l4 - // Using the generic implementation here is actually faster than the - // assembly. Probably because the body of this function is so simple that - // the compiler can figure out better optimizations by inlining the carry - // propagation. TODO - return v.carryPropagateGeneric() -} - -// Subtract sets v = a - b, and returns v. -func (v *Element) Subtract(a, b *Element) *Element { - // We first add 2 * p, to guarantee the subtraction won't underflow, and - // then subtract b (which can be up to 2^255 + 2^13 * 19). - v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 - v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 - v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 - v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 - v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 - return v.carryPropagate() -} - -// Negate sets v = -a, and returns v. -func (v *Element) Negate(a *Element) *Element { - return v.Subtract(feZero, a) -} - -// Invert sets v = 1/z mod p, and returns v. -// -// If z == 0, Invert returns v = 0. -func (v *Element) Invert(z *Element) *Element { - // Inversion is implemented as exponentiation with exponent p − 2. It uses the - // same sequence of 255 squarings and 11 multiplications as [Curve25519]. - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element - - z2.Square(z) // 2 - t.Square(&z2) // 4 - t.Square(&t) // 8 - z9.Multiply(&t, z) // 9 - z11.Multiply(&z9, &z2) // 11 - t.Square(&z11) // 22 - z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 - - t.Square(&z2_5_0) // 2^6 - 2^1 - for i := 0; i < 4; i++ { - t.Square(&t) // 2^10 - 2^5 - } - z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 - - t.Square(&z2_10_0) // 2^11 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^20 - 2^10 - } - z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 - - t.Square(&z2_20_0) // 2^21 - 2^1 - for i := 0; i < 19; i++ { - t.Square(&t) // 2^40 - 2^20 - } - t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 - - t.Square(&t) // 2^41 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^50 - 2^10 - } - z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 - - t.Square(&z2_50_0) // 2^51 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^100 - 2^50 - } - z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 - - t.Square(&z2_100_0) // 2^101 - 2^1 - for i := 0; i < 99; i++ { - t.Square(&t) // 2^200 - 2^100 - } - t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 - - t.Square(&t) // 2^201 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^250 - 2^50 - } - t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 - - t.Square(&t) // 2^251 - 2^1 - t.Square(&t) // 2^252 - 2^2 - t.Square(&t) // 2^253 - 2^3 - t.Square(&t) // 2^254 - 2^4 - t.Square(&t) // 2^255 - 2^5 - - return v.Multiply(&t, &z11) // 2^255 - 21 -} - -// Set sets v = a, and returns v. -func (v *Element) Set(a *Element) *Element { - *v = *a - return v -} - -// SetBytes sets v to x, which must be a 32-byte little-endian encoding. -// -// Consistent with RFC 7748, the most significant bit (the high bit of the -// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) -// are accepted. Note that this is laxer than specified by RFC 8032. -func (v *Element) SetBytes(x []byte) *Element { - if len(x) != 32 { - panic("edwards25519: invalid field element input size") - } - - // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). - v.l0 = binary.LittleEndian.Uint64(x[0:8]) - v.l0 &= maskLow51Bits - // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). - v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 - v.l1 &= maskLow51Bits - // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). - v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 - v.l2 &= maskLow51Bits - // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). - v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 - v.l3 &= maskLow51Bits - // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). - // Note: not bytes 25:33, shift 4, to avoid overread. - v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 - v.l4 &= maskLow51Bits - - return v -} - -// Bytes returns the canonical 32-byte little-endian encoding of v. -func (v *Element) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var out [32]byte - return v.bytes(&out) -} - -func (v *Element) bytes(out *[32]byte) []byte { - t := *v - t.reduce() - - var buf [8]byte - for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { - bitsOffset := i * 51 - binary.LittleEndian.PutUint64(buf[:], l<= len(out) { - break - } - out[off] |= bb - } - } - - return out[:] -} - -// Equal returns 1 if v and u are equal, and 0 otherwise. -func (v *Element) Equal(u *Element) int { - sa, sv := u.Bytes(), v.Bytes() - return subtle.ConstantTimeCompare(sa, sv) -} - -// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. -func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } - -// Select sets v to a if cond == 1, and to b if cond == 0. -func (v *Element) Select(a, b *Element, cond int) *Element { - m := mask64Bits(cond) - v.l0 = (m & a.l0) | (^m & b.l0) - v.l1 = (m & a.l1) | (^m & b.l1) - v.l2 = (m & a.l2) | (^m & b.l2) - v.l3 = (m & a.l3) | (^m & b.l3) - v.l4 = (m & a.l4) | (^m & b.l4) - return v -} - -// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. -func (v *Element) Swap(u *Element, cond int) { - m := mask64Bits(cond) - t := m & (v.l0 ^ u.l0) - v.l0 ^= t - u.l0 ^= t - t = m & (v.l1 ^ u.l1) - v.l1 ^= t - u.l1 ^= t - t = m & (v.l2 ^ u.l2) - v.l2 ^= t - u.l2 ^= t - t = m & (v.l3 ^ u.l3) - v.l3 ^= t - u.l3 ^= t - t = m & (v.l4 ^ u.l4) - v.l4 ^= t - u.l4 ^= t -} - -// IsNegative returns 1 if v is negative, and 0 otherwise. -func (v *Element) IsNegative() int { - return int(v.Bytes()[0] & 1) -} - -// Absolute sets v to |u|, and returns v. -func (v *Element) Absolute(u *Element) *Element { - return v.Select(new(Element).Negate(u), u, u.IsNegative()) -} - -// Multiply sets v = x * y, and returns v. -func (v *Element) Multiply(x, y *Element) *Element { - feMul(v, x, y) - return v -} - -// Square sets v = x * x, and returns v. -func (v *Element) Square(x *Element) *Element { - feSquare(v, x) - return v -} - -// Mult32 sets v = x * y, and returns v. -func (v *Element) Mult32(x *Element, y uint32) *Element { - x0lo, x0hi := mul51(x.l0, y) - x1lo, x1hi := mul51(x.l1, y) - x2lo, x2hi := mul51(x.l2, y) - x3lo, x3hi := mul51(x.l3, y) - x4lo, x4hi := mul51(x.l4, y) - v.l0 = x0lo + 19*x4hi // carried over per the reduction identity - v.l1 = x1lo + x0hi - v.l2 = x2lo + x1hi - v.l3 = x3lo + x2hi - v.l4 = x4lo + x3hi - // The hi portions are going to be only 32 bits, plus any previous excess, - // so we can skip the carry propagation. - return v -} - -// mul51 returns lo + hi * 2⁵¹ = a * b. -func mul51(a uint64, b uint32) (lo uint64, hi uint64) { - mh, ml := bits.Mul64(a, uint64(b)) - lo = ml & maskLow51Bits - hi = (mh << 13) | (ml >> 51) - return -} - -// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. -func (v *Element) Pow22523(x *Element) *Element { - var t0, t1, t2 Element - - t0.Square(x) // x^2 - t1.Square(&t0) // x^4 - t1.Square(&t1) // x^8 - t1.Multiply(x, &t1) // x^9 - t0.Multiply(&t0, &t1) // x^11 - t0.Square(&t0) // x^22 - t0.Multiply(&t1, &t0) // x^31 - t1.Square(&t0) // x^62 - for i := 1; i < 5; i++ { // x^992 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 - t1.Square(&t0) // 2^11 - 2 - for i := 1; i < 10; i++ { // 2^20 - 2^10 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^20 - 1 - t2.Square(&t1) // 2^21 - 2 - for i := 1; i < 20; i++ { // 2^40 - 2^20 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^40 - 1 - t1.Square(&t1) // 2^41 - 2 - for i := 1; i < 10; i++ { // 2^50 - 2^10 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^50 - 1 - t1.Square(&t0) // 2^51 - 2 - for i := 1; i < 50; i++ { // 2^100 - 2^50 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^100 - 1 - t2.Square(&t1) // 2^101 - 2 - for i := 1; i < 100; i++ { // 2^200 - 2^100 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^200 - 1 - t1.Square(&t1) // 2^201 - 2 - for i := 1; i < 50; i++ { // 2^250 - 2^50 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^250 - 1 - t0.Square(&t0) // 2^251 - 2 - t0.Square(&t0) // 2^252 - 4 - return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) -} - -// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. -var sqrtM1 = &Element{1718705420411056, 234908883556509, - 2233514472574048, 2117202627021982, 765476049583133} - -// SqrtRatio sets r to the non-negative square root of the ratio of u and v. -// -// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio -// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, -// and returns r and 0. -func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { - var a, b Element - - // r = (u * v3) * (u * v7)^((p-5)/8) - v2 := a.Square(v) - uv3 := b.Multiply(u, b.Multiply(v2, v)) - uv7 := a.Multiply(uv3, a.Square(v2)) - r.Multiply(uv3, r.Pow22523(uv7)) - - check := a.Multiply(v, a.Square(r)) // check = v * r^2 - - uNeg := b.Negate(u) - correctSignSqrt := check.Equal(u) - flippedSignSqrt := check.Equal(uNeg) - flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) - - rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r - // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) - r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) - - r.Absolute(r) // Choose the nonnegative square root. - return r, correctSignSqrt | flippedSignSqrt -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go deleted file mode 100644 index edcf163c..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -package field - -// feMul sets out = a * b. It works like feMulGeneric. -// -//go:noescape -func feMul(out *Element, a *Element, b *Element) - -// feSquare sets out = a * a. It works like feSquareGeneric. -// -//go:noescape -func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s deleted file mode 100644 index 293f013c..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -// func feMul(out *Element, a *Element, b *Element) -TEXT ·feMul(SB), NOSPLIT, $0-24 - MOVQ a+8(FP), CX - MOVQ b+16(FP), BX - - // r0 = a0×b0 - MOVQ (CX), AX - MULQ (BX) - MOVQ AX, DI - MOVQ DX, SI - - // r0 += 19×a1×b4 - MOVQ 8(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a2×b3 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a3×b2 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a4×b1 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 8(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r1 = a0×b1 - MOVQ (CX), AX - MULQ 8(BX) - MOVQ AX, R9 - MOVQ DX, R8 - - // r1 += a1×b0 - MOVQ 8(CX), AX - MULQ (BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a2×b4 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a3×b3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a4×b2 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r2 = a0×b2 - MOVQ (CX), AX - MULQ 16(BX) - MOVQ AX, R11 - MOVQ DX, R10 - - // r2 += a1×b1 - MOVQ 8(CX), AX - MULQ 8(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += a2×b0 - MOVQ 16(CX), AX - MULQ (BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a3×b4 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a4×b3 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r3 = a0×b3 - MOVQ (CX), AX - MULQ 24(BX) - MOVQ AX, R13 - MOVQ DX, R12 - - // r3 += a1×b2 - MOVQ 8(CX), AX - MULQ 16(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a2×b1 - MOVQ 16(CX), AX - MULQ 8(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a3×b0 - MOVQ 24(CX), AX - MULQ (BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += 19×a4×b4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r4 = a0×b4 - MOVQ (CX), AX - MULQ 32(BX) - MOVQ AX, R15 - MOVQ DX, R14 - - // r4 += a1×b3 - MOVQ 8(CX), AX - MULQ 24(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a2×b2 - MOVQ 16(CX), AX - MULQ 16(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a3×b1 - MOVQ 24(CX), AX - MULQ 8(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a4×b0 - MOVQ 32(CX), AX - MULQ (BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, DI, SI - SHLQ $0x0d, R9, R8 - SHLQ $0x0d, R11, R10 - SHLQ $0x0d, R13, R12 - SHLQ $0x0d, R15, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Second reduction chain (carryPropagate) - MOVQ DI, SI - SHRQ $0x33, SI - MOVQ R9, R8 - SHRQ $0x33, R8 - MOVQ R11, R10 - SHRQ $0x33, R10 - MOVQ R13, R12 - SHRQ $0x33, R12 - MOVQ R15, R14 - SHRQ $0x33, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Store output - MOVQ out+0(FP), AX - MOVQ DI, (AX) - MOVQ R9, 8(AX) - MOVQ R11, 16(AX) - MOVQ R13, 24(AX) - MOVQ R15, 32(AX) - RET - -// func feSquare(out *Element, a *Element) -TEXT ·feSquare(SB), NOSPLIT, $0-16 - MOVQ a+8(FP), CX - - // r0 = l0×l0 - MOVQ (CX), AX - MULQ (CX) - MOVQ AX, SI - MOVQ DX, BX - - // r0 += 38×l1×l4 - MOVQ 8(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r0 += 38×l2×l3 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 24(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r1 = 2×l0×l1 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 8(CX) - MOVQ AX, R8 - MOVQ DX, DI - - // r1 += 38×l2×l4 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r1 += 19×l3×l3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r2 = 2×l0×l2 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 16(CX) - MOVQ AX, R10 - MOVQ DX, R9 - - // r2 += l1×l1 - MOVQ 8(CX), AX - MULQ 8(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r2 += 38×l3×l4 - MOVQ 24(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r3 = 2×l0×l3 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 24(CX) - MOVQ AX, R12 - MOVQ DX, R11 - - // r3 += 2×l1×l2 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 16(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r3 += 19×l4×l4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r4 = 2×l0×l4 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 32(CX) - MOVQ AX, R14 - MOVQ DX, R13 - - // r4 += 2×l1×l3 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 24(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // r4 += l2×l2 - MOVQ 16(CX), AX - MULQ 16(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, SI, BX - SHLQ $0x0d, R8, DI - SHLQ $0x0d, R10, R9 - SHLQ $0x0d, R12, R11 - SHLQ $0x0d, R14, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Second reduction chain (carryPropagate) - MOVQ SI, BX - SHRQ $0x33, BX - MOVQ R8, DI - SHRQ $0x33, DI - MOVQ R10, R9 - SHRQ $0x33, R9 - MOVQ R12, R11 - SHRQ $0x33, R11 - MOVQ R14, R13 - SHRQ $0x33, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Store output - MOVQ out+0(FP), AX - MOVQ SI, (AX) - MOVQ R8, 8(AX) - MOVQ R10, 16(AX) - MOVQ R12, 24(AX) - MOVQ R14, 32(AX) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go deleted file mode 100644 index ddb6c9b8..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego -// +build !amd64 !gc purego - -package field - -func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } - -func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go deleted file mode 100644 index af459ef5..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -package field - -//go:noescape -func carryPropagate(v *Element) - -func (v *Element) carryPropagate() *Element { - carryPropagate(v) - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s deleted file mode 100644 index 5c91e458..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -#include "textflag.h" - -// carryPropagate works exactly like carryPropagateGeneric and uses the -// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but -// avoids loading R0-R4 twice and uses LDP and STP. -// -// See https://golang.org/issues/43145 for the main compiler issue. -// -// func carryPropagate(v *Element) -TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 - MOVD v+0(FP), R20 - - LDP 0(R20), (R0, R1) - LDP 16(R20), (R2, R3) - MOVD 32(R20), R4 - - AND $0x7ffffffffffff, R0, R10 - AND $0x7ffffffffffff, R1, R11 - AND $0x7ffffffffffff, R2, R12 - AND $0x7ffffffffffff, R3, R13 - AND $0x7ffffffffffff, R4, R14 - - ADD R0>>51, R11, R11 - ADD R1>>51, R12, R12 - ADD R2>>51, R13, R13 - ADD R3>>51, R14, R14 - // R4>>51 * 19 + R10 -> R10 - LSR $51, R4, R21 - MOVD $19, R22 - MADD R22, R10, R21, R10 - - STP (R10, R11), 0(R20) - STP (R12, R13), 16(R20) - MOVD R14, 32(R20) - - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go deleted file mode 100644 index 234a5b2e..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !arm64 || !gc || purego -// +build !arm64 !gc purego - -package field - -func (v *Element) carryPropagate() *Element { - return v.carryPropagateGeneric() -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go deleted file mode 100644 index 2671217d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package field - -import "math/bits" - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -// mul64 returns a * b. -func mul64(a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - return uint128{lo, hi} -} - -// addMul64 returns v + a * b. -func addMul64(v uint128, a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - lo, c := bits.Add64(lo, v.lo, 0) - hi, _ = bits.Add64(hi, v.hi, c) - return uint128{lo, hi} -} - -// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. -func shiftRightBy51(a uint128) uint64 { - return (a.hi << (64 - 51)) | (a.lo >> 51) -} - -func feMulGeneric(v, a, b *Element) { - a0 := a.l0 - a1 := a.l1 - a2 := a.l2 - a3 := a.l3 - a4 := a.l4 - - b0 := b.l0 - b1 := b.l1 - b2 := b.l2 - b3 := b.l3 - b4 := b.l4 - - // Limb multiplication works like pen-and-paper columnar multiplication, but - // with 51-bit limbs instead of digits. - // - // a4 a3 a2 a1 a0 x - // b4 b3 b2 b1 b0 = - // ------------------------ - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a4b1 a3b1 a2b1 a1b1 a0b1 + - // a4b2 a3b2 a2b2 a1b2 a0b2 + - // a4b3 a3b3 a2b3 a1b3 a0b3 + - // a4b4 a3b4 a2b4 a1b4 a0b4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to - // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, - // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. - // - // Reduction can be carried out simultaneously to multiplication. For - // example, we do not compute r5: whenever the result of a multiplication - // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. - // - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a3b1 a2b1 a1b1 a0b1 19×a4b1 + - // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + - // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + - // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // Finally we add up the columns into wide, overlapping limbs. - - a1_19 := a1 * 19 - a2_19 := a2 * 19 - a3_19 := a3 * 19 - a4_19 := a4 * 19 - - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - r0 := mul64(a0, b0) - r0 = addMul64(r0, a1_19, b4) - r0 = addMul64(r0, a2_19, b3) - r0 = addMul64(r0, a3_19, b2) - r0 = addMul64(r0, a4_19, b1) - - // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) - r1 := mul64(a0, b1) - r1 = addMul64(r1, a1, b0) - r1 = addMul64(r1, a2_19, b4) - r1 = addMul64(r1, a3_19, b3) - r1 = addMul64(r1, a4_19, b2) - - // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) - r2 := mul64(a0, b2) - r2 = addMul64(r2, a1, b1) - r2 = addMul64(r2, a2, b0) - r2 = addMul64(r2, a3_19, b4) - r2 = addMul64(r2, a4_19, b3) - - // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 - r3 := mul64(a0, b3) - r3 = addMul64(r3, a1, b2) - r3 = addMul64(r3, a2, b1) - r3 = addMul64(r3, a3, b0) - r3 = addMul64(r3, a4_19, b4) - - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - r4 := mul64(a0, b4) - r4 = addMul64(r4, a1, b3) - r4 = addMul64(r4, a2, b2) - r4 = addMul64(r4, a3, b1) - r4 = addMul64(r4, a4, b0) - - // After the multiplication, we need to reduce (carry) the five coefficients - // to obtain a result with limbs that are at most slightly larger than 2⁵¹, - // to respect the Element invariant. - // - // Overall, the reduction works the same as carryPropagate, except with - // wider inputs: we take the carry for each coefficient by shifting it right - // by 51, and add it to the limb above it. The top carry is multiplied by 19 - // according to the reduction identity and added to the lowest limb. - // - // The largest coefficient (r0) will be at most 111 bits, which guarantees - // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. - // - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) - // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² - // r0 < 2⁷ × 2⁵² × 2⁵² - // r0 < 2¹¹¹ - // - // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most - // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and - // allows us to easily apply the reduction identity. - // - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - // r4 < 5 × 2⁵² × 2⁵² - // r4 < 2¹⁰⁷ - // - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - // Now all coefficients fit into 64-bit registers but are still too large to - // be passed around as a Element. We therefore do one last carry chain, - // where the carries will be small enough to fit in the wiggle room above 2⁵¹. - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -func feSquareGeneric(v, a *Element) { - l0 := a.l0 - l1 := a.l1 - l2 := a.l2 - l3 := a.l3 - l4 := a.l4 - - // Squaring works precisely like multiplication above, but thanks to its - // symmetry we get to group a few terms together. - // - // l4 l3 l2 l1 l0 x - // l4 l3 l2 l1 l0 = - // ------------------------ - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l4l1 l3l1 l2l1 l1l1 l0l1 + - // l4l2 l3l2 l2l2 l1l2 l0l2 + - // l4l3 l3l3 l2l3 l1l3 l0l3 + - // l4l4 l3l4 l2l4 l1l4 l0l4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l3l1 l2l1 l1l1 l0l1 19×l4l1 + - // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + - // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + - // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with - // only three Mul64 and four Add64, instead of five and eight. - - l0_2 := l0 * 2 - l1_2 := l1 * 2 - - l1_38 := l1 * 38 - l2_38 := l2 * 38 - l3_38 := l3 * 38 - - l3_19 := l3 * 19 - l4_19 := l4 * 19 - - // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) - r0 := mul64(l0, l0) - r0 = addMul64(r0, l1_38, l4) - r0 = addMul64(r0, l2_38, l3) - - // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 - r1 := mul64(l0_2, l1) - r1 = addMul64(r1, l2_38, l4) - r1 = addMul64(r1, l3_19, l3) - - // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 - r2 := mul64(l0_2, l2) - r2 = addMul64(r2, l1, l1) - r2 = addMul64(r2, l3_38, l4) - - // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 - r3 := mul64(l0_2, l3) - r3 = addMul64(r3, l1_2, l2) - r3 = addMul64(r3, l4_19, l4) - - // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 - r4 := mul64(l0_2, l4) - r4 = addMul64(r4, l1_2, l3) - r4 = addMul64(r4, l2, l2) - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction -// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline -func (v *Element) carryPropagateGeneric() *Element { - c0 := v.l0 >> 51 - c1 := v.l1 >> 51 - c2 := v.l2 >> 51 - c3 := v.l3 >> 51 - c4 := v.l4 >> 51 - - v.l0 = v.l0&maskLow51Bits + c4*19 - v.l1 = v.l1&maskLow51Bits + c0 - v.l2 = v.l2&maskLow51Bits + c1 - v.l3 = v.l3&maskLow51Bits + c2 - v.l4 = v.l4&maskLow51Bits + c3 - - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint deleted file mode 100644 index e3685f95..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint +++ /dev/null @@ -1 +0,0 @@ -b0c49ae9f59d233526f8934262c5bbbe14d4358d diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh deleted file mode 100644 index 1ba22a8b..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -euo pipefail - -cd "$(git rev-parse --show-toplevel)" - -STD_PATH=src/crypto/ed25519/internal/edwards25519/field -LOCAL_PATH=curve25519/internal/field -LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint) - -git fetch https://go.googlesource.com/go master - -if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then - echo "No changes." -else - NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint) - echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..." - git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \ - git apply -3 --directory=$LOCAL_PATH -fi diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index a7828345..00000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/nacl/box/box.go b/vendor/golang.org/x/crypto/nacl/box/box.go deleted file mode 100644 index 7f3b830e..00000000 --- a/vendor/golang.org/x/crypto/nacl/box/box.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package box authenticates and encrypts small messages using public-key cryptography. - -Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate -messages. The length of messages is not hidden. - -It is the caller's responsibility to ensure the uniqueness of nonces—for -example, by using nonce 1 for the first message, nonce 2 for the second -message, etc. Nonces are long enough that randomly generated nonces have -negligible risk of collision. - -Messages should be small because: - -1. The whole message needs to be held in memory to be processed. - -2. Using large messages pressures implementations on small machines to decrypt -and process plaintext before authenticating it. This is very dangerous, and -this API does not allow it, but a protocol that uses excessive message sizes -might present some implementations with no other choice. - -3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. - -4. Performance may be improved by working with messages that fit into data caches. - -Thus large amounts of data should be chunked so that each message is small. -(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable -chunk size. - -This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html. -Anonymous sealing/opening is an extension of NaCl defined by and interoperable -with libsodium: -https://libsodium.gitbook.io/doc/public-key_cryptography/sealed_boxes. -*/ -package box // import "golang.org/x/crypto/nacl/box" - -import ( - cryptorand "crypto/rand" - "io" - - "golang.org/x/crypto/blake2b" - "golang.org/x/crypto/curve25519" - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/salsa20/salsa" -) - -const ( - // Overhead is the number of bytes of overhead when boxing a message. - Overhead = secretbox.Overhead - - // AnonymousOverhead is the number of bytes of overhead when using anonymous - // sealed boxes. - AnonymousOverhead = Overhead + 32 -) - -// GenerateKey generates a new public/private key pair suitable for use with -// Seal and Open. -func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) { - publicKey = new([32]byte) - privateKey = new([32]byte) - _, err = io.ReadFull(rand, privateKey[:]) - if err != nil { - publicKey = nil - privateKey = nil - return - } - - curve25519.ScalarBaseMult(publicKey, privateKey) - return -} - -var zeros [16]byte - -// Precompute calculates the shared key between peersPublicKey and privateKey -// and writes it to sharedKey. The shared key can be used with -// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing -// when using the same pair of keys repeatedly. -func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) { - curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey) - salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma) -} - -// Seal appends an encrypted and authenticated copy of message to out, which -// will be Overhead bytes longer than the original and must not overlap it. The -// nonce must be unique for each distinct message for a given pair of keys. -func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte { - var sharedKey [32]byte - Precompute(&sharedKey, peersPublicKey, privateKey) - return secretbox.Seal(out, message, nonce, &sharedKey) -} - -// SealAfterPrecomputation performs the same actions as Seal, but takes a -// shared key as generated by Precompute. -func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte { - return secretbox.Seal(out, message, nonce, sharedKey) -} - -// Open authenticates and decrypts a box produced by Seal and appends the -// message to out, which must not overlap box. The output will be Overhead -// bytes smaller than box. -func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) { - var sharedKey [32]byte - Precompute(&sharedKey, peersPublicKey, privateKey) - return secretbox.Open(out, box, nonce, &sharedKey) -} - -// OpenAfterPrecomputation performs the same actions as Open, but takes a -// shared key as generated by Precompute. -func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) { - return secretbox.Open(out, box, nonce, sharedKey) -} - -// SealAnonymous appends an encrypted and authenticated copy of message to out, -// which will be AnonymousOverhead bytes longer than the original and must not -// overlap it. This differs from Seal in that the sender is not required to -// provide a private key. -func SealAnonymous(out, message []byte, recipient *[32]byte, rand io.Reader) ([]byte, error) { - if rand == nil { - rand = cryptorand.Reader - } - ephemeralPub, ephemeralPriv, err := GenerateKey(rand) - if err != nil { - return nil, err - } - - var nonce [24]byte - if err := sealNonce(ephemeralPub, recipient, &nonce); err != nil { - return nil, err - } - - if total := len(out) + AnonymousOverhead + len(message); cap(out) < total { - original := out - out = make([]byte, 0, total) - out = append(out, original...) - } - out = append(out, ephemeralPub[:]...) - - return Seal(out, message, &nonce, recipient, ephemeralPriv), nil -} - -// OpenAnonymous authenticates and decrypts a box produced by SealAnonymous and -// appends the message to out, which must not overlap box. The output will be -// AnonymousOverhead bytes smaller than box. -func OpenAnonymous(out, box []byte, publicKey, privateKey *[32]byte) (message []byte, ok bool) { - if len(box) < AnonymousOverhead { - return nil, false - } - - var ephemeralPub [32]byte - copy(ephemeralPub[:], box[:32]) - - var nonce [24]byte - if err := sealNonce(&ephemeralPub, publicKey, &nonce); err != nil { - return nil, false - } - - return Open(out, box[32:], &nonce, &ephemeralPub, privateKey) -} - -// sealNonce generates a 24 byte nonce that is a blake2b digest of the -// ephemeral public key and the receiver's public key. -func sealNonce(ephemeralPub, peersPublicKey *[32]byte, nonce *[24]byte) error { - h, err := blake2b.New(24, nil) - if err != nil { - return err - } - - if _, err = h.Write(ephemeralPub[:]); err != nil { - return err - } - - if _, err = h.Write(peersPublicKey[:]); err != nil { - return err - } - - h.Sum(nonce[:0]) - - return nil -} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go deleted file mode 100644 index f3c3242a..00000000 --- a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package secretbox encrypts and authenticates small messages. - -Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with -secret-key cryptography. The length of messages is not hidden. - -It is the caller's responsibility to ensure the uniqueness of nonces—for -example, by using nonce 1 for the first message, nonce 2 for the second -message, etc. Nonces are long enough that randomly generated nonces have -negligible risk of collision. - -Messages should be small because: - -1. The whole message needs to be held in memory to be processed. - -2. Using large messages pressures implementations on small machines to decrypt -and process plaintext before authenticating it. This is very dangerous, and -this API does not allow it, but a protocol that uses excessive message sizes -might present some implementations with no other choice. - -3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. - -4. Performance may be improved by working with messages that fit into data caches. - -Thus large amounts of data should be chunked so that each message is small. -(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable -chunk size. - -This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. -*/ -package secretbox // import "golang.org/x/crypto/nacl/secretbox" - -import ( - "golang.org/x/crypto/internal/alias" - "golang.org/x/crypto/internal/poly1305" - "golang.org/x/crypto/salsa20/salsa" -) - -// Overhead is the number of bytes of overhead when boxing a message. -const Overhead = poly1305.TagSize - -// setup produces a sub-key and Salsa20 counter given a nonce and key. -func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { - // We use XSalsa20 for encryption so first we need to generate a - // key and nonce with HSalsa20. - var hNonce [16]byte - copy(hNonce[:], nonce[:]) - salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) - - // The final 8 bytes of the original nonce form the new nonce. - copy(counter[:], nonce[16:]) -} - -// sliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} - -// Seal appends an encrypted and authenticated copy of message to out, which -// must not overlap message. The key and nonce pair must be unique for each -// distinct message and the output will be Overhead bytes longer than message. -func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { - var subKey [32]byte - var counter [16]byte - setup(&subKey, &counter, nonce, key) - - // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since - // Salsa20 works with 64-byte blocks, we also generate 32 bytes of - // keystream as a side effect. - var firstBlock [64]byte - salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) - - var poly1305Key [32]byte - copy(poly1305Key[:], firstBlock[:]) - - ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) - if alias.AnyOverlap(out, message) { - panic("nacl: invalid buffer overlap") - } - - // We XOR up to 32 bytes of message with the keystream generated from - // the first block. - firstMessageBlock := message - if len(firstMessageBlock) > 32 { - firstMessageBlock = firstMessageBlock[:32] - } - - tagOut := out - out = out[poly1305.TagSize:] - for i, x := range firstMessageBlock { - out[i] = firstBlock[32+i] ^ x - } - message = message[len(firstMessageBlock):] - ciphertext := out - out = out[len(firstMessageBlock):] - - // Now encrypt the rest. - counter[8] = 1 - salsa.XORKeyStream(out, message, &counter, &subKey) - - var tag [poly1305.TagSize]byte - poly1305.Sum(&tag, ciphertext, &poly1305Key) - copy(tagOut, tag[:]) - - return ret -} - -// Open authenticates and decrypts a box produced by Seal and appends the -// message to out, which must not overlap box. The output will be Overhead -// bytes smaller than box. -func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { - if len(box) < Overhead { - return nil, false - } - - var subKey [32]byte - var counter [16]byte - setup(&subKey, &counter, nonce, key) - - // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since - // Salsa20 works with 64-byte blocks, we also generate 32 bytes of - // keystream as a side effect. - var firstBlock [64]byte - salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) - - var poly1305Key [32]byte - copy(poly1305Key[:], firstBlock[:]) - var tag [poly1305.TagSize]byte - copy(tag[:], box) - - if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { - return nil, false - } - - ret, out := sliceForAppend(out, len(box)-Overhead) - if alias.AnyOverlap(out, box) { - panic("nacl: invalid buffer overlap") - } - - // We XOR up to 32 bytes of box with the keystream generated from - // the first block. - box = box[Overhead:] - firstMessageBlock := box - if len(firstMessageBlock) > 32 { - firstMessageBlock = firstMessageBlock[:32] - } - for i, x := range firstMessageBlock { - out[i] = firstBlock[32+i] ^ x - } - - box = box[len(firstMessageBlock):] - out = out[len(firstMessageBlock):] - - // Now decrypt the rest. - counter[8] = 1 - salsa.XORKeyStream(out, box, &counter, &subKey) - - return ret, true -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go deleted file mode 100644 index 3fd05b27..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package salsa provides low-level access to functions in the Salsa family. -package salsa // import "golang.org/x/crypto/salsa20/salsa" - -import "math/bits" - -// Sigma is the Salsa20 constant for 256-bit keys. -var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} - -// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte -// key k, and 16-byte constant c, and puts the result into the 32-byte array -// out. -func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { - x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 - x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 - x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 - x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 - x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 - x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 - x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 - x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 - x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 - x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 - x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 - x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 - - for i := 0; i < 20; i += 2 { - u := x0 + x12 - x4 ^= bits.RotateLeft32(u, 7) - u = x4 + x0 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x4 - x12 ^= bits.RotateLeft32(u, 13) - u = x12 + x8 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x1 - x9 ^= bits.RotateLeft32(u, 7) - u = x9 + x5 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x9 - x1 ^= bits.RotateLeft32(u, 13) - u = x1 + x13 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x6 - x14 ^= bits.RotateLeft32(u, 7) - u = x14 + x10 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x14 - x6 ^= bits.RotateLeft32(u, 13) - u = x6 + x2 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x11 - x3 ^= bits.RotateLeft32(u, 7) - u = x3 + x15 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x3 - x11 ^= bits.RotateLeft32(u, 13) - u = x11 + x7 - x15 ^= bits.RotateLeft32(u, 18) - - u = x0 + x3 - x1 ^= bits.RotateLeft32(u, 7) - u = x1 + x0 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x1 - x3 ^= bits.RotateLeft32(u, 13) - u = x3 + x2 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x4 - x6 ^= bits.RotateLeft32(u, 7) - u = x6 + x5 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x6 - x4 ^= bits.RotateLeft32(u, 13) - u = x4 + x7 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x9 - x11 ^= bits.RotateLeft32(u, 7) - u = x11 + x10 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x11 - x9 ^= bits.RotateLeft32(u, 13) - u = x9 + x8 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x14 - x12 ^= bits.RotateLeft32(u, 7) - u = x12 + x15 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x12 - x14 ^= bits.RotateLeft32(u, 13) - u = x14 + x13 - x15 ^= bits.RotateLeft32(u, 18) - } - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x5) - out[5] = byte(x5 >> 8) - out[6] = byte(x5 >> 16) - out[7] = byte(x5 >> 24) - - out[8] = byte(x10) - out[9] = byte(x10 >> 8) - out[10] = byte(x10 >> 16) - out[11] = byte(x10 >> 24) - - out[12] = byte(x15) - out[13] = byte(x15 >> 8) - out[14] = byte(x15 >> 16) - out[15] = byte(x15 >> 24) - - out[16] = byte(x6) - out[17] = byte(x6 >> 8) - out[18] = byte(x6 >> 16) - out[19] = byte(x6 >> 24) - - out[20] = byte(x7) - out[21] = byte(x7 >> 8) - out[22] = byte(x7 >> 16) - out[23] = byte(x7 >> 24) - - out[24] = byte(x8) - out[25] = byte(x8 >> 8) - out[26] = byte(x8 >> 16) - out[27] = byte(x8 >> 24) - - out[28] = byte(x9) - out[29] = byte(x9 >> 8) - out[30] = byte(x9 >> 16) - out[31] = byte(x9 >> 24) -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go deleted file mode 100644 index 7ec7bb39..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package salsa - -import "math/bits" - -// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts -// the result into the 64-byte array out. The input and output may be the same array. -func Core208(out *[64]byte, in *[64]byte) { - j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 - j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 - j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 - j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 - j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 - j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 - j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 - j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 - j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 - j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 - j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 - j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 - x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 - - for i := 0; i < 8; i += 2 { - u := x0 + x12 - x4 ^= bits.RotateLeft32(u, 7) - u = x4 + x0 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x4 - x12 ^= bits.RotateLeft32(u, 13) - u = x12 + x8 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x1 - x9 ^= bits.RotateLeft32(u, 7) - u = x9 + x5 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x9 - x1 ^= bits.RotateLeft32(u, 13) - u = x1 + x13 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x6 - x14 ^= bits.RotateLeft32(u, 7) - u = x14 + x10 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x14 - x6 ^= bits.RotateLeft32(u, 13) - u = x6 + x2 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x11 - x3 ^= bits.RotateLeft32(u, 7) - u = x3 + x15 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x3 - x11 ^= bits.RotateLeft32(u, 13) - u = x11 + x7 - x15 ^= bits.RotateLeft32(u, 18) - - u = x0 + x3 - x1 ^= bits.RotateLeft32(u, 7) - u = x1 + x0 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x1 - x3 ^= bits.RotateLeft32(u, 13) - u = x3 + x2 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x4 - x6 ^= bits.RotateLeft32(u, 7) - u = x6 + x5 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x6 - x4 ^= bits.RotateLeft32(u, 13) - u = x4 + x7 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x9 - x11 ^= bits.RotateLeft32(u, 7) - u = x11 + x10 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x11 - x9 ^= bits.RotateLeft32(u, 13) - u = x9 + x8 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x14 - x12 ^= bits.RotateLeft32(u, 7) - u = x12 + x15 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x12 - x14 ^= bits.RotateLeft32(u, 13) - u = x14 + x13 - x15 ^= bits.RotateLeft32(u, 18) - } - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - x4 += j4 - x5 += j5 - x6 += j6 - x7 += j7 - x8 += j8 - x9 += j9 - x10 += j10 - x11 += j11 - x12 += j12 - x13 += j13 - x14 += j14 - x15 += j15 - - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x1) - out[5] = byte(x1 >> 8) - out[6] = byte(x1 >> 16) - out[7] = byte(x1 >> 24) - - out[8] = byte(x2) - out[9] = byte(x2 >> 8) - out[10] = byte(x2 >> 16) - out[11] = byte(x2 >> 24) - - out[12] = byte(x3) - out[13] = byte(x3 >> 8) - out[14] = byte(x3 >> 16) - out[15] = byte(x3 >> 24) - - out[16] = byte(x4) - out[17] = byte(x4 >> 8) - out[18] = byte(x4 >> 16) - out[19] = byte(x4 >> 24) - - out[20] = byte(x5) - out[21] = byte(x5 >> 8) - out[22] = byte(x5 >> 16) - out[23] = byte(x5 >> 24) - - out[24] = byte(x6) - out[25] = byte(x6 >> 8) - out[26] = byte(x6 >> 16) - out[27] = byte(x6 >> 24) - - out[28] = byte(x7) - out[29] = byte(x7 >> 8) - out[30] = byte(x7 >> 16) - out[31] = byte(x7 >> 24) - - out[32] = byte(x8) - out[33] = byte(x8 >> 8) - out[34] = byte(x8 >> 16) - out[35] = byte(x8 >> 24) - - out[36] = byte(x9) - out[37] = byte(x9 >> 8) - out[38] = byte(x9 >> 16) - out[39] = byte(x9 >> 24) - - out[40] = byte(x10) - out[41] = byte(x10 >> 8) - out[42] = byte(x10 >> 16) - out[43] = byte(x10 >> 24) - - out[44] = byte(x11) - out[45] = byte(x11 >> 8) - out[46] = byte(x11 >> 16) - out[47] = byte(x11 >> 24) - - out[48] = byte(x12) - out[49] = byte(x12 >> 8) - out[50] = byte(x12 >> 16) - out[51] = byte(x12 >> 24) - - out[52] = byte(x13) - out[53] = byte(x13 >> 8) - out[54] = byte(x13 >> 16) - out[55] = byte(x13 >> 24) - - out[56] = byte(x14) - out[57] = byte(x14 >> 8) - out[58] = byte(x14 >> 16) - out[59] = byte(x14 >> 24) - - out[60] = byte(x15) - out[61] = byte(x15 >> 8) - out[62] = byte(x15 >> 16) - out[63] = byte(x15 >> 24) -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go deleted file mode 100644 index c400dfcf..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && !purego && gc -// +build amd64,!purego,gc - -package salsa - -//go:noescape - -// salsa2020XORKeyStream is implemented in salsa20_amd64.s. -func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out must overlap entirely or not at all. Counter -// contains the raw salsa20 counter bytes (both nonce and block counter). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - if len(in) == 0 { - return - } - _ = out[len(in)-1] - salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s deleted file mode 100644 index c0892772..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s +++ /dev/null @@ -1,881 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && !purego && gc -// +build amd64,!purego,gc - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) -// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. -TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment - MOVQ out+0(FP),DI - MOVQ in+8(FP),SI - MOVQ n+16(FP),DX - MOVQ nonce+24(FP),CX - MOVQ key+32(FP),R8 - - MOVQ SP,R12 - ADDQ $31, R12 - ANDQ $~31, R12 - - MOVQ DX,R9 - MOVQ CX,DX - MOVQ R8,R10 - CMPQ R9,$0 - JBE DONE - START: - MOVL 20(R10),CX - MOVL 0(R10),R8 - MOVL 0(DX),AX - MOVL 16(R10),R11 - MOVL CX,0(R12) - MOVL R8, 4 (R12) - MOVL AX, 8 (R12) - MOVL R11, 12 (R12) - MOVL 8(DX),CX - MOVL 24(R10),R8 - MOVL 4(R10),AX - MOVL 4(DX),R11 - MOVL CX,16(R12) - MOVL R8, 20 (R12) - MOVL AX, 24 (R12) - MOVL R11, 28 (R12) - MOVL 12(DX),CX - MOVL 12(R10),DX - MOVL 28(R10),R8 - MOVL 8(R10),AX - MOVL DX,32(R12) - MOVL CX, 36 (R12) - MOVL R8, 40 (R12) - MOVL AX, 44 (R12) - MOVQ $1634760805,DX - MOVQ $857760878,CX - MOVQ $2036477234,R8 - MOVQ $1797285236,AX - MOVL DX,48(R12) - MOVL CX, 52 (R12) - MOVL R8, 56 (R12) - MOVL AX, 60 (R12) - CMPQ R9,$256 - JB BYTESBETWEEN1AND255 - MOVOA 48(R12),X0 - PSHUFL $0X55,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X3 - PSHUFL $0X00,X0,X0 - MOVOA X1,64(R12) - MOVOA X2,80(R12) - MOVOA X3,96(R12) - MOVOA X0,112(R12) - MOVOA 0(R12),X0 - PSHUFL $0XAA,X0,X1 - PSHUFL $0XFF,X0,X2 - PSHUFL $0X00,X0,X3 - PSHUFL $0X55,X0,X0 - MOVOA X1,128(R12) - MOVOA X2,144(R12) - MOVOA X3,160(R12) - MOVOA X0,176(R12) - MOVOA 16(R12),X0 - PSHUFL $0XFF,X0,X1 - PSHUFL $0X55,X0,X2 - PSHUFL $0XAA,X0,X0 - MOVOA X1,192(R12) - MOVOA X2,208(R12) - MOVOA X0,224(R12) - MOVOA 32(R12),X0 - PSHUFL $0X00,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X0 - MOVOA X1,240(R12) - MOVOA X2,256(R12) - MOVOA X0,272(R12) - BYTESATLEAST256: - MOVL 16(R12),DX - MOVL 36 (R12),CX - MOVL DX,288(R12) - MOVL CX,304(R12) - SHLQ $32,CX - ADDQ CX,DX - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 292 (R12) - MOVL CX, 308 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 296 (R12) - MOVL CX, 312 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 300 (R12) - MOVL CX, 316 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX,16(R12) - MOVL CX, 36 (R12) - MOVQ R9,352(R12) - MOVQ $20,DX - MOVOA 64(R12),X0 - MOVOA 80(R12),X1 - MOVOA 96(R12),X2 - MOVOA 256(R12),X3 - MOVOA 272(R12),X4 - MOVOA 128(R12),X5 - MOVOA 144(R12),X6 - MOVOA 176(R12),X7 - MOVOA 192(R12),X8 - MOVOA 208(R12),X9 - MOVOA 224(R12),X10 - MOVOA 304(R12),X11 - MOVOA 112(R12),X12 - MOVOA 160(R12),X13 - MOVOA 240(R12),X14 - MOVOA 288(R12),X15 - MAINLOOP1: - MOVOA X1,320(R12) - MOVOA X2,336(R12) - MOVOA X13,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X14 - PSRLL $25,X2 - PXOR X2,X14 - MOVOA X7,X1 - PADDL X0,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X11 - PSRLL $25,X2 - PXOR X2,X11 - MOVOA X12,X1 - PADDL X14,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X15 - PSRLL $23,X2 - PXOR X2,X15 - MOVOA X0,X1 - PADDL X11,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X9 - PSRLL $23,X2 - PXOR X2,X9 - MOVOA X14,X1 - PADDL X15,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X13 - PSRLL $19,X2 - PXOR X2,X13 - MOVOA X11,X1 - PADDL X9,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X7 - PSRLL $19,X2 - PXOR X2,X7 - MOVOA X15,X1 - PADDL X13,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA 320(R12),X1 - MOVOA X12,320(R12) - MOVOA X9,X2 - PADDL X7,X2 - MOVOA X2,X12 - PSLLL $18,X2 - PXOR X2,X0 - PSRLL $14,X12 - PXOR X12,X0 - MOVOA X5,X2 - PADDL X1,X2 - MOVOA X2,X12 - PSLLL $7,X2 - PXOR X2,X3 - PSRLL $25,X12 - PXOR X12,X3 - MOVOA 336(R12),X2 - MOVOA X0,336(R12) - MOVOA X6,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X4 - PSRLL $25,X12 - PXOR X12,X4 - MOVOA X1,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X10 - PSRLL $23,X12 - PXOR X12,X10 - MOVOA X2,X0 - PADDL X4,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X8 - PSRLL $23,X12 - PXOR X12,X8 - MOVOA X3,X0 - PADDL X10,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X5 - PSRLL $19,X12 - PXOR X12,X5 - MOVOA X4,X0 - PADDL X8,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X6 - PSRLL $19,X12 - PXOR X12,X6 - MOVOA X10,X0 - PADDL X5,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA 320(R12),X0 - MOVOA X1,320(R12) - MOVOA X4,X1 - PADDL X0,X1 - MOVOA X1,X12 - PSLLL $7,X1 - PXOR X1,X7 - PSRLL $25,X12 - PXOR X12,X7 - MOVOA X8,X1 - PADDL X6,X1 - MOVOA X1,X12 - PSLLL $18,X1 - PXOR X1,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 336(R12),X12 - MOVOA X2,336(R12) - MOVOA X14,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X5 - PSRLL $25,X2 - PXOR X2,X5 - MOVOA X0,X1 - PADDL X7,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X10 - PSRLL $23,X2 - PXOR X2,X10 - MOVOA X12,X1 - PADDL X5,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X8 - PSRLL $23,X2 - PXOR X2,X8 - MOVOA X7,X1 - PADDL X10,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X4 - PSRLL $19,X2 - PXOR X2,X4 - MOVOA X5,X1 - PADDL X8,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X14 - PSRLL $19,X2 - PXOR X2,X14 - MOVOA X10,X1 - PADDL X4,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X0 - PSRLL $14,X2 - PXOR X2,X0 - MOVOA 320(R12),X1 - MOVOA X0,320(R12) - MOVOA X8,X0 - PADDL X14,X0 - MOVOA X0,X2 - PSLLL $18,X0 - PXOR X0,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA X11,X0 - PADDL X1,X0 - MOVOA X0,X2 - PSLLL $7,X0 - PXOR X0,X6 - PSRLL $25,X2 - PXOR X2,X6 - MOVOA 336(R12),X2 - MOVOA X12,336(R12) - MOVOA X3,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X13 - PSRLL $25,X12 - PXOR X12,X13 - MOVOA X1,X0 - PADDL X6,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X15 - PSRLL $23,X12 - PXOR X12,X15 - MOVOA X2,X0 - PADDL X13,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X9 - PSRLL $23,X12 - PXOR X12,X9 - MOVOA X6,X0 - PADDL X15,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X11 - PSRLL $19,X12 - PXOR X12,X11 - MOVOA X13,X0 - PADDL X9,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X3 - PSRLL $19,X12 - PXOR X12,X3 - MOVOA X15,X0 - PADDL X11,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA X9,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 320(R12),X12 - MOVOA 336(R12),X0 - SUBQ $2,DX - JA MAINLOOP1 - PADDL 112(R12),X12 - PADDL 176(R12),X7 - PADDL 224(R12),X10 - PADDL 272(R12),X4 - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 0(SI),DX - XORL 4(SI),CX - XORL 8(SI),R8 - XORL 12(SI),R9 - MOVL DX,0(DI) - MOVL CX,4(DI) - MOVL R8,8(DI) - MOVL R9,12(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 64(SI),DX - XORL 68(SI),CX - XORL 72(SI),R8 - XORL 76(SI),R9 - MOVL DX,64(DI) - MOVL CX,68(DI) - MOVL R8,72(DI) - MOVL R9,76(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 128(SI),DX - XORL 132(SI),CX - XORL 136(SI),R8 - XORL 140(SI),R9 - MOVL DX,128(DI) - MOVL CX,132(DI) - MOVL R8,136(DI) - MOVL R9,140(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - XORL 192(SI),DX - XORL 196(SI),CX - XORL 200(SI),R8 - XORL 204(SI),R9 - MOVL DX,192(DI) - MOVL CX,196(DI) - MOVL R8,200(DI) - MOVL R9,204(DI) - PADDL 240(R12),X14 - PADDL 64(R12),X0 - PADDL 128(R12),X5 - PADDL 192(R12),X8 - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 16(SI),DX - XORL 20(SI),CX - XORL 24(SI),R8 - XORL 28(SI),R9 - MOVL DX,16(DI) - MOVL CX,20(DI) - MOVL R8,24(DI) - MOVL R9,28(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 80(SI),DX - XORL 84(SI),CX - XORL 88(SI),R8 - XORL 92(SI),R9 - MOVL DX,80(DI) - MOVL CX,84(DI) - MOVL R8,88(DI) - MOVL R9,92(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 144(SI),DX - XORL 148(SI),CX - XORL 152(SI),R8 - XORL 156(SI),R9 - MOVL DX,144(DI) - MOVL CX,148(DI) - MOVL R8,152(DI) - MOVL R9,156(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - XORL 208(SI),DX - XORL 212(SI),CX - XORL 216(SI),R8 - XORL 220(SI),R9 - MOVL DX,208(DI) - MOVL CX,212(DI) - MOVL R8,216(DI) - MOVL R9,220(DI) - PADDL 288(R12),X15 - PADDL 304(R12),X11 - PADDL 80(R12),X1 - PADDL 144(R12),X6 - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 32(SI),DX - XORL 36(SI),CX - XORL 40(SI),R8 - XORL 44(SI),R9 - MOVL DX,32(DI) - MOVL CX,36(DI) - MOVL R8,40(DI) - MOVL R9,44(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 96(SI),DX - XORL 100(SI),CX - XORL 104(SI),R8 - XORL 108(SI),R9 - MOVL DX,96(DI) - MOVL CX,100(DI) - MOVL R8,104(DI) - MOVL R9,108(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 160(SI),DX - XORL 164(SI),CX - XORL 168(SI),R8 - XORL 172(SI),R9 - MOVL DX,160(DI) - MOVL CX,164(DI) - MOVL R8,168(DI) - MOVL R9,172(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - XORL 224(SI),DX - XORL 228(SI),CX - XORL 232(SI),R8 - XORL 236(SI),R9 - MOVL DX,224(DI) - MOVL CX,228(DI) - MOVL R8,232(DI) - MOVL R9,236(DI) - PADDL 160(R12),X13 - PADDL 208(R12),X9 - PADDL 256(R12),X3 - PADDL 96(R12),X2 - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 48(SI),DX - XORL 52(SI),CX - XORL 56(SI),R8 - XORL 60(SI),R9 - MOVL DX,48(DI) - MOVL CX,52(DI) - MOVL R8,56(DI) - MOVL R9,60(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 112(SI),DX - XORL 116(SI),CX - XORL 120(SI),R8 - XORL 124(SI),R9 - MOVL DX,112(DI) - MOVL CX,116(DI) - MOVL R8,120(DI) - MOVL R9,124(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 176(SI),DX - XORL 180(SI),CX - XORL 184(SI),R8 - XORL 188(SI),R9 - MOVL DX,176(DI) - MOVL CX,180(DI) - MOVL R8,184(DI) - MOVL R9,188(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - XORL 240(SI),DX - XORL 244(SI),CX - XORL 248(SI),R8 - XORL 252(SI),R9 - MOVL DX,240(DI) - MOVL CX,244(DI) - MOVL R8,248(DI) - MOVL R9,252(DI) - MOVQ 352(R12),R9 - SUBQ $256,R9 - ADDQ $256,SI - ADDQ $256,DI - CMPQ R9,$256 - JAE BYTESATLEAST256 - CMPQ R9,$0 - JBE DONE - BYTESBETWEEN1AND255: - CMPQ R9,$64 - JAE NOCOPY - MOVQ DI,DX - LEAQ 360(R12),DI - MOVQ R9,CX - REP; MOVSB - LEAQ 360(R12),DI - LEAQ 360(R12),SI - NOCOPY: - MOVQ R9,352(R12) - MOVOA 48(R12),X0 - MOVOA 0(R12),X1 - MOVOA 16(R12),X2 - MOVOA 32(R12),X3 - MOVOA X1,X4 - MOVQ $20,CX - MAINLOOP2: - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - SUBQ $4,CX - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PXOR X7,X7 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - JA MAINLOOP2 - PADDL 48(R12),X0 - PADDL 0(R12),X1 - PADDL 16(R12),X2 - PADDL 32(R12),X3 - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 0(SI),CX - XORL 48(SI),R8 - XORL 32(SI),R9 - XORL 16(SI),AX - MOVL CX,0(DI) - MOVL R8,48(DI) - MOVL R9,32(DI) - MOVL AX,16(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 20(SI),CX - XORL 4(SI),R8 - XORL 52(SI),R9 - XORL 36(SI),AX - MOVL CX,20(DI) - MOVL R8,4(DI) - MOVL R9,52(DI) - MOVL AX,36(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 40(SI),CX - XORL 24(SI),R8 - XORL 8(SI),R9 - XORL 56(SI),AX - MOVL CX,40(DI) - MOVL R8,24(DI) - MOVL R9,8(DI) - MOVL AX,56(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - XORL 60(SI),CX - XORL 44(SI),R8 - XORL 28(SI),R9 - XORL 12(SI),AX - MOVL CX,60(DI) - MOVL R8,44(DI) - MOVL R9,28(DI) - MOVL AX,12(DI) - MOVQ 352(R12),R9 - MOVL 16(R12),CX - MOVL 36 (R12),R8 - ADDQ $1,CX - SHLQ $32,R8 - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $32,R8 - MOVL CX,16(R12) - MOVL R8, 36 (R12) - CMPQ R9,$64 - JA BYTESATLEAST65 - JAE BYTESATLEAST64 - MOVQ DI,SI - MOVQ DX,DI - MOVQ R9,CX - REP; MOVSB - BYTESATLEAST64: - DONE: - RET - BYTESATLEAST65: - SUBQ $64,R9 - ADDQ $64,DI - ADDQ $64,SI - JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go deleted file mode 100644 index 4392cc1a..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || purego || !gc -// +build !amd64 purego !gc - -package salsa - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out must overlap entirely or not at all. Counter -// contains the raw salsa20 counter bytes (both nonce and block counter). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - genericXORKeyStream(out, in, counter, key) -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go deleted file mode 100644 index e5cdb9a2..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package salsa - -import "math/bits" - -const rounds = 20 - -// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, -// and 16-byte constant c, and puts the result into 64-byte array out. -func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { - j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 - j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 - j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 - j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 - j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 - j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 - j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 - j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 - j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 - j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 - j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 - j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 - x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 - - for i := 0; i < rounds; i += 2 { - u := x0 + x12 - x4 ^= bits.RotateLeft32(u, 7) - u = x4 + x0 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x4 - x12 ^= bits.RotateLeft32(u, 13) - u = x12 + x8 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x1 - x9 ^= bits.RotateLeft32(u, 7) - u = x9 + x5 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x9 - x1 ^= bits.RotateLeft32(u, 13) - u = x1 + x13 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x6 - x14 ^= bits.RotateLeft32(u, 7) - u = x14 + x10 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x14 - x6 ^= bits.RotateLeft32(u, 13) - u = x6 + x2 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x11 - x3 ^= bits.RotateLeft32(u, 7) - u = x3 + x15 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x3 - x11 ^= bits.RotateLeft32(u, 13) - u = x11 + x7 - x15 ^= bits.RotateLeft32(u, 18) - - u = x0 + x3 - x1 ^= bits.RotateLeft32(u, 7) - u = x1 + x0 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x1 - x3 ^= bits.RotateLeft32(u, 13) - u = x3 + x2 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x4 - x6 ^= bits.RotateLeft32(u, 7) - u = x6 + x5 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x6 - x4 ^= bits.RotateLeft32(u, 13) - u = x4 + x7 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x9 - x11 ^= bits.RotateLeft32(u, 7) - u = x11 + x10 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x11 - x9 ^= bits.RotateLeft32(u, 13) - u = x9 + x8 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x14 - x12 ^= bits.RotateLeft32(u, 7) - u = x12 + x15 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x12 - x14 ^= bits.RotateLeft32(u, 13) - u = x14 + x13 - x15 ^= bits.RotateLeft32(u, 18) - } - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - x4 += j4 - x5 += j5 - x6 += j6 - x7 += j7 - x8 += j8 - x9 += j9 - x10 += j10 - x11 += j11 - x12 += j12 - x13 += j13 - x14 += j14 - x15 += j15 - - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x1) - out[5] = byte(x1 >> 8) - out[6] = byte(x1 >> 16) - out[7] = byte(x1 >> 24) - - out[8] = byte(x2) - out[9] = byte(x2 >> 8) - out[10] = byte(x2 >> 16) - out[11] = byte(x2 >> 24) - - out[12] = byte(x3) - out[13] = byte(x3 >> 8) - out[14] = byte(x3 >> 16) - out[15] = byte(x3 >> 24) - - out[16] = byte(x4) - out[17] = byte(x4 >> 8) - out[18] = byte(x4 >> 16) - out[19] = byte(x4 >> 24) - - out[20] = byte(x5) - out[21] = byte(x5 >> 8) - out[22] = byte(x5 >> 16) - out[23] = byte(x5 >> 24) - - out[24] = byte(x6) - out[25] = byte(x6 >> 8) - out[26] = byte(x6 >> 16) - out[27] = byte(x6 >> 24) - - out[28] = byte(x7) - out[29] = byte(x7 >> 8) - out[30] = byte(x7 >> 16) - out[31] = byte(x7 >> 24) - - out[32] = byte(x8) - out[33] = byte(x8 >> 8) - out[34] = byte(x8 >> 16) - out[35] = byte(x8 >> 24) - - out[36] = byte(x9) - out[37] = byte(x9 >> 8) - out[38] = byte(x9 >> 16) - out[39] = byte(x9 >> 24) - - out[40] = byte(x10) - out[41] = byte(x10 >> 8) - out[42] = byte(x10 >> 16) - out[43] = byte(x10 >> 24) - - out[44] = byte(x11) - out[45] = byte(x11 >> 8) - out[46] = byte(x11 >> 16) - out[47] = byte(x11 >> 24) - - out[48] = byte(x12) - out[49] = byte(x12 >> 8) - out[50] = byte(x12 >> 16) - out[51] = byte(x12 >> 24) - - out[52] = byte(x13) - out[53] = byte(x13 >> 8) - out[54] = byte(x13 >> 16) - out[55] = byte(x13 >> 24) - - out[56] = byte(x14) - out[57] = byte(x14 >> 8) - out[58] = byte(x14 >> 16) - out[59] = byte(x14 >> 24) - - out[60] = byte(x15) - out[61] = byte(x15 >> 8) - out[62] = byte(x15 >> 16) - out[63] = byte(x15 >> 24) -} - -// genericXORKeyStream is the generic implementation of XORKeyStream to be used -// when no assembly implementation is available. -func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - var block [64]byte - var counterCopy [16]byte - copy(counterCopy[:], counter[:]) - - for len(in) >= 64 { - core(&block, &counterCopy, key, &Sigma) - for i, x := range block { - out[i] = in[i] ^ x - } - u := uint32(1) - for i := 8; i < 16; i++ { - u += uint32(counterCopy[i]) - counterCopy[i] = byte(u) - u >>= 8 - } - in = in[64:] - out = out[64:] - } - - if len(in) > 0 { - core(&block, &counterCopy, key, &Sigma) - for i, v := range in { - out[i] = v ^ block[i] - } - } -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 1ab07d07..00000000 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive io.EOF. -func (b *buffer) eof() { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index fc04d03e..00000000 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,589 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear -// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. -// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't -// appear in the Signature.Format field. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" - CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" - - // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a - // Certificate.Type (or PublicKey.Type), but only in - // ClientConfig.HostKeyAlgorithms. - CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com" - CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com" -) - -const ( - // Deprecated: use CertAlgoRSAv01. - CertSigAlgoRSAv01 = CertAlgoRSAv01 - // Deprecated: use CertAlgoRSASHA256v01. - CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01 - // Deprecated: use CertAlgoRSASHA512v01. - CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01 -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte - Rest []byte `ssh:"rest"` -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the -// PublicKey interface, so it can be unmarshaled using -// ParsePublicKey. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -type algorithmOpenSSHCertSigner struct { - *openSSHCertSigner - algorithmSigner AlgorithmSigner -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if !bytes.Equal(cert.Key.Marshal(), signer.PublicKey().Marshal()) { - return nil, errors.New("ssh: signer and cert have different public key") - } - - if algorithmSigner, ok := signer.(AlgorithmSigner); ok { - return &algorithmOpenSSHCertSigner{ - &openSSHCertSigner{cert, signer}, algorithmSigner}, nil - } else { - return &openSSHCertSigner{cert, signer}, nil - } -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) - } - - for opt := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert signs the certificate with an authority, setting the Nonce, -// SignatureKey, and Signature fields. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - // Default to KeyAlgoRSASHA512 for ssh-rsa signers. - if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { - sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) - if err != nil { - return err - } - c.Signature = sig - return nil - } - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -// certKeyAlgoNames is a mapping from known certificate algorithm names to the -// corresponding public key signature algorithm. -// -// This map must be kept in sync with the one in agent/client.go. -var certKeyAlgoNames = map[string]string{ - CertAlgoRSAv01: KeyAlgoRSA, - CertAlgoRSASHA256v01: KeyAlgoRSASHA256, - CertAlgoRSASHA512v01: KeyAlgoRSASHA512, - CertAlgoDSAv01: KeyAlgoDSA, - CertAlgoECDSA256v01: KeyAlgoECDSA256, - CertAlgoECDSA384v01: KeyAlgoECDSA384, - CertAlgoECDSA521v01: KeyAlgoECDSA521, - CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, - CertAlgoED25519v01: KeyAlgoED25519, - CertAlgoSKED25519v01: KeyAlgoSKED25519, -} - -// underlyingAlgo returns the signature algorithm associated with algo (which is -// an advertised or negotiated public key or host key algorithm). These are -// usually the same, except for certificate algorithms. -func underlyingAlgo(algo string) string { - if a, ok := certKeyAlgoNames[algo]; ok { - return a - } - return algo -} - -// certificateAlgo returns the certificate algorithms that uses the provided -// underlying signature algorithm. -func certificateAlgo(algo string) (certAlgo string, ok bool) { - for certName, algoName := range certKeyAlgoNames { - if algoName == algo { - return certName, true - } - } - return "", false -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the certificate algorithm name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - certName, ok := certificateAlgo(c.Key.Type()) - if !ok { - panic("unknown certificate type for key type " + c.Key.Type()) - } - return certName -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - switch out.Format { - case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: - out.Rest = in - return out, nil, ok - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index c0834c00..00000000 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (ch *channel) writePacket(packet []byte) error { - ch.writeMu.Lock() - if ch.sentClose { - ch.writeMu.Unlock() - return io.EOF - } - ch.sentClose = (packet[0] == msgChannelClose) - err := ch.mux.conn.writePacket(packet) - ch.writeMu.Unlock() - return err -} - -func (ch *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], ch.remoteId) - return ch.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if ch.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - ch.writeMu.Lock() - packet := ch.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - ch.writeMu.Unlock() - - for len(data) > 0 { - space := min(ch.maxRemotePayload, len(data)) - if space, err = ch.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], ch.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = ch.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - ch.writeMu.Lock() - ch.packetPool[extendedCode] = packet - ch.writeMu.Unlock() - - return n, err -} - -func (ch *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > ch.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - ch.windowMu.Lock() - if ch.myWindow < length { - ch.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - ch.myWindow -= length - ch.windowMu.Unlock() - - if extended == 1 { - ch.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - ch.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (ch *channel) responseMessageReceived() error { - if ch.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if ch.decided { - return errors.New("ssh: duplicate response received for channel") - } - ch.decided = true - return nil -} - -func (ch *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return ch.handleData(packet) - case msgChannelClose: - ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) - ch.mux.chanList.remove(ch.localId) - ch.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - ch.extPending.eof() - ch.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - ch.mux.chanList.remove(msg.PeersID) - ch.msg <- msg - case *channelOpenConfirmMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - ch.remoteId = msg.MyID - ch.maxRemotePayload = msg.MaxPacketSize - ch.remoteWin.add(msg.MyWindow) - ch.msg <- msg - case *windowAdjustMsg: - if !ch.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: ch, - } - - ch.incomingRequests <- &req - default: - ch.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (ch *channel) Accept() (Channel, <-chan *Request, error) { - if ch.decided { - return nil, nil, errDecidedAlready - } - ch.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersID: ch.remoteId, - MyID: ch.localId, - MyWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - } - ch.decided = true - if err := ch.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersID: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersID: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersID: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersID: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersID: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersID: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index 741e984f..00000000 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,789 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - - "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/internal/poly1305" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type cipherMode struct { - keySize int - ivSize int - create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) -} - -func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - stream, err := createFunc(key, iv) - if err != nil { - return nil, err - } - - var streamDump []byte - if skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - mac := macModes[algs.MAC].new(macKey) - return &streamPacketCipher{ - mac: mac, - etm: macModes[algs.MAC].etm, - macResult: make([]byte, mac.Size()), - cipher: stream, - }, nil - } -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - - // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, - "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC 4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - gcm128CipherID: {16, 12, newGCMCipher}, - gcm256CipherID: {32, 12, newGCMCipher}, - chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, - - // CBC mode is insecure and so is not included in the default config. - // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readCipherPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writeCipherPacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - if len(plain) == 0 { - return nil, errors.New("ssh: empty packet") - } - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readCipherPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(io.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) - if err != nil { - return nil, err - } - c.oracleCamouflage -= uint32(n) - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} - -const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" - -// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com -// AEAD, which is described here: -// -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 -// -// the methods here also implement padding, which RFC 4253 Section 6 -// also requires of stream ciphers. -type chacha20Poly1305Cipher struct { - lengthKey [32]byte - contentKey [32]byte - buf []byte -} - -func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - if len(key) != 64 { - panic(len(key)) - } - - c := &chacha20Poly1305Cipher{ - buf: make([]byte, 256), - } - - copy(c.contentKey[:], key[:32]) - copy(c.lengthKey[:], key[32:]) - return c, nil -} - -func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return nil, err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - encryptedLength := c.buf[:4] - if _, err := io.ReadFull(r, encryptedLength); err != nil { - return nil, err - } - - var lenBytes [4]byte - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return nil, err - } - ls.XORKeyStream(lenBytes[:], encryptedLength) - - length := binary.BigEndian.Uint32(lenBytes[:]) - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - contentEnd := 4 + length - packetEnd := contentEnd + poly1305.TagSize - if uint32(cap(c.buf)) < packetEnd { - c.buf = make([]byte, packetEnd) - copy(c.buf[:], encryptedLength) - } else { - c.buf = c.buf[:packetEnd] - } - - if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { - return nil, err - } - - var mac [poly1305.TagSize]byte - copy(mac[:], c.buf[contentEnd:packetEnd]) - if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { - return nil, errors.New("ssh: MAC failure") - } - - plain := c.buf[4:contentEnd] - s.XORKeyStream(plain, plain) - - if len(plain) == 0 { - return nil, errors.New("ssh: empty packet") - } - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding)+1 >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - - plain = plain[1 : len(plain)-int(padding)] - - return plain, nil -} - -func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - // There is no blocksize, so fall back to multiple of 8 byte - // padding, as described in RFC 4253, Sec 6. - const packetSizeMultiple = 8 - - padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple - if padding < 4 { - padding += packetSizeMultiple - } - - // size (4 bytes), padding (1), payload, padding, tag. - totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize - if cap(c.buf) < totalLength { - c.buf = make([]byte, totalLength) - } else { - c.buf = c.buf[:totalLength] - } - - binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return err - } - ls.XORKeyStream(c.buf, c.buf[:4]) - c.buf[4] = byte(padding) - copy(c.buf[5:], payload) - packetEnd := 5 + len(payload) + padding - if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { - return err - } - - s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) - - var mac [poly1305.TagSize]byte - poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) - - copy(c.buf[packetEnd:], mac[:]) - - if _, err := w.Write(c.buf); err != nil { - return err - } - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index bdc356cb..00000000 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - handleForwardsOnce sync.Once // guards calling (*Client).handleForwards - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c, user: fullConf.User}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key exchange. -// algo is the negotiated algorithm, and may be a certificate type. -func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - if a := underlyingAlgo(algo); sig.Format != a { - return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a) - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// BannerCallback is the function type used for treat the banner sent by -// the server. A BannerCallback receives the message sent by the remote server. -type BannerCallback func(message string) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // BannerCallback is called during the SSH dance to display a custom - // server's message. The client configuration can supply this callback to - // handle it as wished. The function BannerDisplayStderr can be used for - // simplistic display on Stderr. - BannerCallback BannerCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the public key algorithms that the client will - // accept from the server for host key authentication, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from a PublicKey.Type method may be used, or - // any of the CertAlgo and KeyAlgo constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} - -// BannerDisplayStderr returns a function that can be used for -// ClientConfig.BannerCallback to display banners on os.Stderr. -func BannerDisplayStderr() BannerCallback { - return func(banner string) error { - _, err := os.Stderr.WriteString(banner) - - return err - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index 409b5ea1..00000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,725 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "strings" -) - -type authResult int - -const ( - authFailure authResult = iota - authPartialSuccess - authSuccess -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we - // advertised willingness to receive one, which we always do) or not. See - // RFC 8308, Section 2.4. - extensions := make(map[string][]byte) - if len(packet) > 0 && packet[0] == msgExtInfo { - var extInfo extInfoMsg - if err := Unmarshal(packet, &extInfo); err != nil { - return err - } - payload := extInfo.Payload - for i := uint32(0); i < extInfo.NumExtensions; i++ { - name, rest, ok := parseString(payload) - if !ok { - return parseError(msgExtInfo) - } - value, rest, ok := parseString(rest) - if !ok { - return parseError(msgExtInfo) - } - extensions[string(name)] = value - payload = rest - } - packet, err = c.transport.readPacket() - if err != nil { - return err - } - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - var tried []string - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) - if err != nil { - return err - } - if ok == authSuccess { - // success - return nil - } else if ok == authFailure { - if m := auth.method(); !contains(tried, m) { - tried = append(tried, m) - } - } - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if contains(tried, candidateMethod) { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) -} - -func contains(list []string, e string) bool { - for _, s := range list { - if s == e { - return true - } - } - return false -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return authFailure, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { - keyFormat := signer.PublicKey().Type() - - // Like in sendKexInit, if the public key implements AlgorithmSigner we - // assume it supports all algorithms, otherwise only the key format one. - as, ok := signer.(AlgorithmSigner) - if !ok { - return algorithmSignerWrapper{signer}, keyFormat - } - - extPayload, ok := extensions["server-sig-algs"] - if !ok { - // If there is no "server-sig-algs" extension, fall back to the key - // format algorithm. - return as, keyFormat - } - - // The server-sig-algs extension only carries underlying signature - // algorithm, but we are trying to select a protocol-level public key - // algorithm, which might be a certificate type. Extend the list of server - // supported algorithms to include the corresponding certificate algorithms. - serverAlgos := strings.Split(string(extPayload), ",") - for _, algo := range serverAlgos { - if certAlgo, ok := certificateAlgo(algo); ok { - serverAlgos = append(serverAlgos, certAlgo) - } - } - - keyAlgos := algorithmsForKeyFormat(keyFormat) - algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) - if err != nil { - // If there is no overlap, try the key anyway with the key format - // algorithm, to support servers that fail to list all supported - // algorithms. - return as, keyFormat - } - return as, algo -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return authFailure, nil, err - } - var methods []string - for _, signer := range signers { - pub := signer.PublicKey() - as, algo := pickSignatureAlgorithm(signer, extensions) - - ok, err := validateKey(pub, algo, user, c) - if err != nil { - return authFailure, nil, err - } - if !ok { - continue - } - - pubKey := pub.Marshal() - data := buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, algo, pubKey) - sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) - if err != nil { - return authFailure, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: algo, - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return authFailure, nil, err - } - var success authResult - success, methods, err = handleAuthResponse(c) - if err != nil { - return authFailure, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: algo, - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, algo, c) -} - -func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { - pubKey := key.Marshal() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, err - } - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (authResult, []string, error) { - gotMsgExtInfo := false - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - case msgExtInfo: - // Ignore post-authentication RFC 8308 extensions, once. - if gotMsgExtInfo { - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - gotMsgExtInfo = true - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -func handleBannerResponse(c packetConn, packet []byte) error { - var msg userAuthBannerMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - transport, ok := c.(*handshakeTransport) - if !ok { - return nil - } - - if transport.bannerCallback != nil { - return transport.bannerCallback(msg.Message) - } - - return nil -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the name and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns an AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return authFailure, nil, err - } - - gotMsgExtInfo := false - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - continue - case msgExtInfo: - // Ignore post-authentication RFC 8308 extensions, once. - if gotMsgExtInfo { - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - gotMsgExtInfo = true - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return authFailure, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.Name, msg.Instruction, prompts, echos) - if err != nil { - return authFailure, nil, err - } - - if len(answers) != len(prompts) { - return authFailure, nil, fmt.Errorf("ssh: incorrect number of answers from keyboard-interactive callback %d (expected %d)", len(answers), len(prompts)) - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return authFailure, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions) - if ok != authFailure || err != nil { // either success, partial success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} - -// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. -// See RFC 4462 section 3 -// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. -// target is the server host you want to log in to. -func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { - if gssAPIClient == nil { - panic("gss-api client must be not nil with enable gssapi-with-mic") - } - return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} -} - -type gssAPIWithMICCallback struct { - gssAPIClient GSSAPIClient - target string -} - -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - m := &userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: g.method(), - } - // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. - // See RFC 4462 section 3.2. - m.Payload = appendU32(m.Payload, 1) - m.Payload = appendString(m.Payload, string(krb5OID)) - if err := c.writePacket(Marshal(m)); err != nil { - return authFailure, nil, err - } - // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an - // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or - // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. - // See RFC 4462 section 3.3. - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check - // selected mech if it is valid. - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - userAuthGSSAPIResp := &userAuthGSSAPIResponse{} - if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { - return authFailure, nil, err - } - // Start the loop into the exchange token. - // See RFC 4462 section 3.4. - var token []byte - defer g.gssAPIClient.DeleteSecContext() - for { - // Initiates the establishment of a security context between the application and a remote peer. - nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) - if err != nil { - return authFailure, nil, err - } - if len(nextToken) > 0 { - if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: nextToken, - })); err != nil { - return authFailure, nil, err - } - } - if !needContinue { - break - } - packet, err = c.readPacket() - if err != nil { - return authFailure, nil, err - } - switch packet[0] { - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthGSSAPIError: - userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} - if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { - return authFailure, nil, err - } - return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ - "Major Status: %d\n"+ - "Minor Status: %d\n"+ - "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, - userAuthGSSAPIErrorResp.Message) - case msgUserAuthGSSAPIToken: - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return authFailure, nil, err - } - token = userAuthGSSAPITokenReq.Token - } - } - // Binding Encryption Keys. - // See RFC 4462 section 3.5. - micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") - micToken, err := g.gssAPIClient.GetMIC(micField) - if err != nil { - return authFailure, nil, err - } - if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ - MIC: micToken, - })); err != nil { - return authFailure, nil, err - } - return handleAuthResponse(c) -} - -func (g *gssAPIWithMICCallback) method() string { - return "gssapi-with-mic" -} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index b419c761..00000000 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,471 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "strings" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers lists ciphers we support but might not recommend. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", gcm256CipherID, - chacha20Poly1305ID, - "arcfour256", "arcfour128", "arcfour", - aes128cbcID, - tripledescbcID, -} - -// preferredCiphers specifies the default preference for ciphers. -var preferredCiphers = []string{ - "aes128-gcm@openssh.com", gcm256CipherID, - chacha20Poly1305ID, - "aes128-ctr", "aes192-ctr", "aes256-ctr", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1, - kexAlgoDH1SHA1, -} - -// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden -// for the server half. -var serverForbiddenKexAlgos = map[string]struct{}{ - kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests - kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests -} - -// preferredKexAlgos specifies the default preference for key-exchange -// algorithms in preference order. The diffie-hellman-group16-sha512 algorithm -// is disabled by default because it is a bit slower than the others. -var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH14SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA256, KeyAlgoRSASHA512, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported signature algorithms to their -// respective hashes needed for signing and verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoRSASHA256: crypto.SHA256, - KeyAlgoRSASHA512: crypto.SHA512, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - // KeyAlgoED25519 doesn't pre-hash. - KeyAlgoSKECDSA256: crypto.SHA256, - KeyAlgoSKED25519: crypto.SHA256, -} - -// algorithmsForKeyFormat returns the supported signature algorithms for a given -// public key format (PublicKey.Type), in order of preference. See RFC 8332, -// Section 2. See also the note in sendKexInit on backwards compatibility. -func algorithmsForKeyFormat(keyFormat string) []string { - switch keyFormat { - case KeyAlgoRSA: - return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA} - case CertAlgoRSAv01: - return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01} - default: - return []string{keyFormat} - } -} - -// isRSA returns whether algo is a supported RSA algorithm, including certificate -// algorithms. -func isRSA(algo string) bool { - algos := algorithmsForKeyFormat(KeyAlgoRSA) - return contains(algos, underlyingAlgo(algo)) -} - -// supportedPubKeyAuthAlgos specifies the supported client public key -// authentication algorithms. Note that this doesn't include certificate types -// since those use the underlying algorithm. This list is sent to the client if -// it supports the server-sig-algs extension. Order is irrelevant. -var supportedPubKeyAuthAlgos = []string{ - KeyAlgoED25519, - KeyAlgoSKED25519, KeyAlgoSKECDSA256, - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA, - KeyAlgoDSA, -} - -var supportedPubKeyAuthAlgosList = strings.Join(supportedPubKeyAuthAlgos, ",") - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -// directionAlgorithms records algorithm choices in one direction (either read or write) -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC 4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcm128CipherID, gcm256CipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC 4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -var aeadCiphers = map[string]bool{ - gcm128CipherID: true, - gcm256CipherID: true, - chacha20Poly1305ID: true, -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - stoc, ctos := &result.w, &result.r - if isClient { - ctos, stoc = stoc, ctos - } - - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - if !aeadCiphers[ctos.Cipher] { - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - } - - if !aeadCiphers[stoc.Cipher] { - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - } - - ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a default set - // of algorithms is used. Unsupported values are silently ignored. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible default is - // used. Unsupported values are silently ignored. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default is - // used. Unsupported values are silently ignored. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = preferredCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // Ignore the cipher if we have no cipherModes definition. - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = preferredKexAlgos - } - var kexs []string - for _, k := range c.KeyExchanges { - if kexAlgoMap[k] != nil { - // Ignore the KEX if we have no kexAlgoMap definition. - kexs = append(kexs, k) - } - } - c.KeyExchanges = kexs - - if c.MACs == nil { - c.MACs = supportedMACs - } - var macs []string - for _, m := range c.MACs { - if macModes[m] != nil { - // Ignore the MAC if we have no macModes definition. - macs = append(macs, m) - } - } - c.MACs = macs - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. algo is the advertised -// algorithm, and may be a certificate type. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo string - PubKey []byte - }{ - sessionID, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index 8f345ee9..00000000 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC 4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshConn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index f6bff60d..00000000 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 07a1843e..00000000 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,735 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - writePacketsLeft uint32 - writeBytesLeft int64 - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - kexLoopDone chan struct{} // closed (with writeError non-nil) when kexLoop exits - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // bannerCallback is non-empty if we are the client and it has been set in - // ClientConfig. In that case it is called during the user authentication - // dance to handle a custom server's message. - bannerCallback BannerCallback - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - // Counters exclusively owned by readLoop. - readPacketsLeft uint32 - readBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex), - kexLoopDone: make(chan struct{}), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - t.bannerCallback = config.BannerCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // Unblock reader. - t.conn.Close() - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - for request := range t.startKex { - request.done <- t.getWriteError() - } - - // Mark that the loop is done so that Close can return. - close(t.kexLoopDone) -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - isServer := len(t.hostKeys) > 0 - if isServer { - for _, k := range t.hostKeys { - // If k is an AlgorithmSigner, presume it supports all signature algorithms - // associated with the key format. (Ideally AlgorithmSigner would have a - // method to advertise supported algorithms, but it doesn't. This means that - // adding support for a new algorithm is a breaking change, as we will - // immediately negotiate it even if existing implementations don't support - // it. If that ever happens, we'll have to figure something out.) - // If k is not an AlgorithmSigner, we can only assume it only supports the - // algorithms that matches the key format. (This means that Sign can't pick - // a different default.) - keyFormat := k.PublicKey().Type() - if _, ok := k.(AlgorithmSigner); ok { - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) - } else { - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) - } - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - - // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what - // algorithms the server supports for public key authentication. See RFC - // 8308, Section 2.1. - if firstKeyExchange := t.sessionID == nil; firstKeyExchange { - msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) - msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) - msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") - } - } - - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - // Close the connection. This should cause the readLoop goroutine to wake up - // and close t.startKex, which will shut down kexLoop if running. - err := t.conn.Close() - - // Wait for the kexLoop goroutine to complete. - // At that point we know that the readLoop goroutine is complete too, - // because kexLoop itself waits for readLoop to close the startKex channel. - <-t.kexLoopDone - - return err -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - isClient := len(t.hostKeys) == 0 - if isClient { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, &magics) - } else { - result, err = t.client(kex, &magics) - } - - if err != nil { - return err - } - - firstKeyExchange := t.sessionID == nil - if firstKeyExchange { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - - // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO - // message with the server-sig-algs extension if the client supports it. See - // RFC 8308, Sections 2.4 and 3.1. - if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { - extInfo := &extInfoMsg{ - NumExtensions: 1, - Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)), - } - extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs")) - extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...) - extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList)) - extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...) - if err := t.conn.writePacket(Marshal(extInfo)); err != nil { - return err - } - } - - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -// algorithmSignerWrapper is an AlgorithmSigner that only supports the default -// key format algorithm. -// -// This is technically a violation of the AlgorithmSigner interface, but it -// should be unreachable given where we use this. Anyway, at least it returns an -// error instead of panicing or producing an incorrect signature. -type algorithmSignerWrapper struct { - Signer -} - -func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != underlyingAlgo(a.PublicKey().Type()) { - return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm") - } - return a.Sign(rand, data) -} - -func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { - for _, k := range hostKeys { - if algo == k.PublicKey().Type() { - return algorithmSignerWrapper{k} - } - k, ok := k.(AlgorithmSigner) - if !ok { - continue - } - for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) { - if algo == a { - return k - } - } - } - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { - hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) - if hostKey == nil { - return nil, errors.New("ssh: internal error: negotiated unsupported signature type") - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go deleted file mode 100644 index af81d266..00000000 --- a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. -// -// See https://flak.tedunangst.com/post/bcrypt-pbkdf and -// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. -package bcrypt_pbkdf - -import ( - "crypto/sha512" - "errors" - "golang.org/x/crypto/blowfish" -) - -const blockSize = 32 - -// Key derives a key from the password, salt and rounds count, returning a -// []byte of length keyLen that can be used as cryptographic key. -func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { - if rounds < 1 { - return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") - } - if len(password) == 0 { - return nil, errors.New("bcrypt_pbkdf: empty password") - } - if len(salt) == 0 || len(salt) > 1<<20 { - return nil, errors.New("bcrypt_pbkdf: bad salt length") - } - if keyLen > 1024 { - return nil, errors.New("bcrypt_pbkdf: keyLen is too large") - } - - numBlocks := (keyLen + blockSize - 1) / blockSize - key := make([]byte, numBlocks*blockSize) - - h := sha512.New() - h.Write(password) - shapass := h.Sum(nil) - - shasalt := make([]byte, 0, sha512.Size) - cnt, tmp := make([]byte, 4), make([]byte, blockSize) - for block := 1; block <= numBlocks; block++ { - h.Reset() - h.Write(salt) - cnt[0] = byte(block >> 24) - cnt[1] = byte(block >> 16) - cnt[2] = byte(block >> 8) - cnt[3] = byte(block) - h.Write(cnt) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - - out := make([]byte, blockSize) - copy(out, tmp) - for i := 2; i <= rounds; i++ { - h.Reset() - h.Write(tmp) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - for j := 0; j < len(out); j++ { - out[j] ^= tmp[j] - } - } - - for i, v := range out { - key[i*numBlocks+(block-1)] = v - } - } - return key[:keyLen], nil -} - -var magic = []byte("OxychromaticBlowfishSwatDynamite") - -func bcryptHash(out, shapass, shasalt []byte) { - c, err := blowfish.NewSaltedCipher(shapass, shasalt) - if err != nil { - panic(err) - } - for i := 0; i < 64; i++ { - blowfish.ExpandKey(shasalt, c) - blowfish.ExpandKey(shapass, c) - } - copy(out, magic) - for i := 0; i < 32; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(out[i:i+8], out[i:i+8]) - } - } - // Swap bytes due to different endianness. - for i := 0; i < 32; i += 4 { - out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] - } -} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index 8a05f799..00000000 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,786 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" - kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" - kexAlgoCurve25519SHA256 = "curve25519-sha256" - - // For the following kex only the client half contains a production - // ready implementation. The server half only consists of a minimal - // implementation to satisfy the automated tests. - kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" - kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. algo is the negotiated algorithm, and may - // be a certificate type. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int - hashFunc crypto.Hash -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - ki, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := group.hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: group.hashFunc, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - ki, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := group.hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H, algo) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: group.hashFunc, - }, err -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H, algo) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in - // RFC 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - hashFunc: crypto.SHA1, - } - - // This are the groups called diffie-hellman-group14-sha1 and - // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, - // and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - group14 := &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: group14.g, p: group14.p, pMinus1: group14.pMinus1, - hashFunc: crypto.SHA1, - } - kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ - g: group14.g, p: group14.p, pMinus1: group14.pMinus1, - hashFunc: crypto.SHA256, - } - - // This is the group called diffie-hellman-group16-sha512 in RFC - // 8268 and Oakley Group 16 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH16SHA512] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - hashFunc: crypto.SHA512, - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} - kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} - kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} - kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} -} - -// curve25519sha256 implements the curve25519-sha256 (formerly known as -// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731. -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H, algo) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} - -// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and -// diffie-hellman-group-exchange-sha256 key agreement protocols, -// as described in RFC 4419 -type dhGEXSHA struct { - hashFunc crypto.Hash -} - -const ( - dhGroupExchangeMinimumBits = 2048 - dhGroupExchangePreferredBits = 2048 - dhGroupExchangeMaximumBits = 8192 -) - -func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - // Send GexRequest - kexDHGexRequest := kexDHGexRequestMsg{ - MinBits: dhGroupExchangeMinimumBits, - PreferedBits: dhGroupExchangePreferredBits, - MaxBits: dhGroupExchangeMaximumBits, - } - if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { - return nil, err - } - - // Receive GexGroup - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var msg kexDHGexGroupMsg - if err = Unmarshal(packet, &msg); err != nil { - return nil, err - } - - // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen()) - } - - // Check if g is safe by verifying that 1 < g < p-1 - pMinusOne := new(big.Int).Sub(msg.P, bigOne) - if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 { - return nil, fmt.Errorf("ssh: server provided gex g is not safe") - } - - // Send GexInit - pHalf := new(big.Int).Rsh(msg.P, 1) - x, err := rand.Int(randSource, pHalf) - if err != nil { - return nil, err - } - X := new(big.Int).Exp(msg.G, x, msg.P) - kexDHGexInit := kexDHGexInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { - return nil, err - } - - // Receive GexReply - packet, err = c.readPacket() - if err != nil { - return nil, err - } - - var kexDHGexReply kexDHGexReplyMsg - if err = Unmarshal(packet, &kexDHGexReply); err != nil { - return nil, err - } - - if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P) - - // Check if k is safe by verifying that k > 1 and k < p - 1 - if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 { - return nil, fmt.Errorf("ssh: derived k is not safe") - } - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, kexDHGexReply.HostKey) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, msg.P) - writeInt(h, msg.G) - writeInt(h, X) - writeInt(h, kexDHGexReply.Y) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHGexReply.HostKey, - Signature: kexDHGexReply.Signature, - Hash: gex.hashFunc, - }, nil -} - -// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. -// -// This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - // Receive GexRequest - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHGexRequest kexDHGexRequestMsg - if err = Unmarshal(packet, &kexDHGexRequest); err != nil { - return - } - - // Send GexGroup - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - g := big.NewInt(2) - - msg := &kexDHGexGroupMsg{ - P: p, - G: g, - } - if err := c.writePacket(Marshal(msg)); err != nil { - return nil, err - } - - // Receive GexInit - packet, err = c.readPacket() - if err != nil { - return - } - var kexDHGexInit kexDHGexInitMsg - if err = Unmarshal(packet, &kexDHGexInit); err != nil { - return - } - - pHalf := new(big.Int).Rsh(p, 1) - - y, err := rand.Int(randSource, pHalf) - if err != nil { - return - } - Y := new(big.Int).Exp(g, y, p) - - pMinusOne := new(big.Int).Sub(p, bigOne) - if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - kInt := new(big.Int).Exp(kexDHGexInit.X, y, p) - - hostKeyBytes := priv.PublicKey().Marshal() - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, p) - writeInt(h, g) - writeInt(h, kexDHGexInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H, algo) - if err != nil { - return nil, err - } - - kexDHGexReply := kexDHGexReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHGexReply) - - err = c.writePacket(packet) - - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: gex.hashFunc, - }, err -} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index dac8ee72..00000000 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1447 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" -) - -// Public key algorithms names. These values can appear in PublicKey.Type, -// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner -// arguments. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" - KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" - - // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not - // public key formats, so they can't appear as a PublicKey.Type. The - // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. - KeyAlgoRSASHA256 = "rsa-sha2-256" - KeyAlgoRSASHA512 = "rsa-sha2-512" -) - -const ( - // Deprecated: use KeyAlgoRSA. - SigAlgoRSA = KeyAlgoRSA - // Deprecated: use KeyAlgoRSASHA256. - SigAlgoRSASHA2256 = KeyAlgoRSASHA256 - // Deprecated: use KeyAlgoRSASHA512. - SigAlgoRSASHA2512 = KeyAlgoRSASHA512 -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoSKECDSA256: - return parseSKECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case KeyAlgoSKED25519: - return parseSKEd25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certKeyAlgoNames[algo]) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKey parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey represents a public key using an unspecified algorithm. -// -// Some PublicKeys provided by this package also implement CryptoPublicKey. -type PublicKey interface { - // Type returns the key format name, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, with the name - // prefix. To unmarshal the returned data, use the ParsePublicKey function. - Marshal() []byte - - // Verify that sig is a signature on the given data using this key. This - // method will hash the data appropriately first. sig.Format is allowed to - // be any signature algorithm compatible with the key type, the caller - // should check if it has more stringent requirements. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -// -// Some Signers provided by this package also implement AlgorithmSigner. -type Signer interface { - // PublicKey returns the associated PublicKey. - PublicKey() PublicKey - - // Sign returns a signature for the given data. This method will hash the - // data appropriately first. The signature algorithm is expected to match - // the key format returned by the PublicKey.Type method (and not to be any - // alternative algorithm supported by the key format). - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -// An AlgorithmSigner is a Signer that also supports specifying an algorithm to -// use for signing. -// -// An AlgorithmSigner can't advertise the algorithms it supports, so it should -// be prepared to be invoked with every algorithm supported by the public key -// format. -type AlgorithmSigner interface { - Signer - - // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired - // signing algorithm. Callers may pass an empty string for the algorithm in - // which case the AlgorithmSigner will use a default algorithm. This default - // doesn't currently control any behavior in this package. - SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - supportedAlgos := algorithmsForKeyFormat(r.Type()) - if !contains(supportedAlgos, sig.Format) { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - hash := hashFuncs[sig.Format] - h := hash.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (k *dsaPublicKey) Type() string { - return "ssh-dss" -} - -func checkDSAParams(param *dsa.Parameters) error { - // SSH specifies FIPS 186-2, which only provided a single size - // (1024 bits) DSA key. FIPS 186-3 allows for larger key - // sizes, which would confuse SSH. - if l := param.P.BitLen(); l != 1024 { - return fmt.Errorf("ssh: unsupported DSA key size %d", l) - } - - return nil -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - param := dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - } - if err := checkDSAParams(¶m); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: param, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := hashFuncs[sig.Format].New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) -} - -func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != "" && algorithm != k.PublicKey().Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - h := hashFuncs[k.PublicKey().Type()].New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (k *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + k.nistID() -} - -func (k *ecdsaPublicKey) nistID() string { - switch k.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (k ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - return ed25519PublicKey(w.KeyBytes), w.Rest, nil -} - -func (k ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(k), - } - return Marshal(&w) -} - -func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k); l != ed25519.PublicKeySize { - return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - - if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (k *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - k.Type(), - k.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := hashFuncs[sig.Format].New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// skFields holds the additional fields present in U2F/FIDO2 signatures. -// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. -type skFields struct { - // Flags contains U2F/FIDO2 flags such as 'user present' - Flags byte - // Counter is a monotonic signature counter which can be - // used to detect concurrent use of a private key, should - // it be extracted from hardware. - Counter uint32 -} - -type skECDSAPublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ecdsa.PublicKey -} - -func (k *skECDSAPublicKey) Type() string { - return KeyAlgoSKECDSA256 -} - -func (k *skECDSAPublicKey) nistID() string { - return "nistp256" -} - -func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(skECDSAPublicKey) - key.application = w.Application - - if w.Curve != "nistp256" { - return nil, nil, errors.New("ssh: unsupported curve") - } - key.Curve = elliptic.P256() - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - - return key, w.Rest, nil -} - -func (k *skECDSAPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - w := struct { - Name string - ID string - Key []byte - Application string - }{ - k.Type(), - k.nistID(), - keyBytes, - k.application, - } - - return Marshal(&w) -} - -func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := hashFuncs[sig.Format].New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var ecSig struct { - R *big.Int - S *big.Int - } - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - h.Reset() - h.Write(original) - digest := h.Sum(nil) - - if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -type skEd25519PublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ed25519.PublicKey -} - -func (k *skEd25519PublicKey) Type() string { - return KeyAlgoSKED25519 -} - -func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - key := new(skEd25519PublicKey) - key.application = w.Application - key.PublicKey = ed25519.PublicKey(w.KeyBytes) - - return key, w.Rest, nil -} - -func (k *skEd25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - Application string - }{ - KeyAlgoSKED25519, - []byte(k.PublicKey), - k.application, - } - return Marshal(&w) -} - -func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k.PublicKey); l != ed25519.PublicKeySize { - return fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - h := hashFuncs[sig.Format].New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var edSig struct { - Signature []byte `ssh:"rest"` - } - - if err := Unmarshal(sig.Blob, &edSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a -// corresponding Signer instance. ECDSA keys must use P-256, P-384 or -// P-521. DSA keys must use parameter size L1024N160. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return newDSAPrivateKey(key) - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { - if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { - return nil, err - } - - return &dsaPrivateKey{key}, nil -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) -} - -func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm == "" { - algorithm = s.pubKey.Type() - } - - supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) - if !contains(supportedAlgos, algorithm) { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) - } - - hashFunc := hashFuncs[algorithm] - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: algorithm, - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - if l := len(key); l != ed25519.PublicKeySize { - return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - return ed25519PublicKey(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. If the private key is encrypted, it -// will return a PassphraseMissingError. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// A PassphraseMissingError indicates that parsing this private key requires a -// passphrase. Use ParsePrivateKeyWithPassphrase. -type PassphraseMissingError struct { - // PublicKey will be set if the private key format includes an unencrypted - // public key along with the encrypted private key. - PublicKey PublicKey -} - -func (*PassphraseMissingError) Error() string { - return "ssh: this private key is passphrase protected" -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It supports -// RSA, DSA, ECDSA, and Ed25519 private keys in PKCS#1, PKCS#8, OpenSSL, and OpenSSH -// formats. If the private key is encrypted, it will return a PassphraseMissingError. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, &PassphraseMissingError{} - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - // RFC5208 - https://tools.ietf.org/html/rfc5208 - case "PRIVATE KEY": - return x509.ParsePKCS8PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If the passphrase is wrong, it -// will return x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if block.Type == "OPENSSH PRIVATE KEY" { - return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) - } - - if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { - return nil, errors.New("ssh: not an encrypted key") - } - - buf, err := x509.DecryptPEMBlock(block, passphrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName != "none" || cipherName != "none" { - return nil, &PassphraseMissingError{} - } - if kdfOpts != "" { - return nil, errors.New("ssh: invalid openssh private key") - } - return privKeyBlock, nil -} - -func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { - return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName == "none" || cipherName == "none" { - return nil, errors.New("ssh: key is not password protected") - } - if kdfName != "bcrypt" { - return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") - } - - var opts struct { - Salt string - Rounds uint32 - } - if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { - return nil, err - } - - k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) - if err != nil { - return nil, err - } - key, iv := k[:32], k[32:] - - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - switch cipherName { - case "aes256-ctr": - ctr := cipher.NewCTR(c, iv) - ctr.XORKeyStream(privKeyBlock, privKeyBlock) - case "aes256-cbc": - if len(privKeyBlock)%c.BlockSize() != 0 { - return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") - } - cbc := cipher.NewCBCDecrypter(c, iv) - cbc.CryptBlocks(privKeyBlock, privKeyBlock) - default: - return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") - } - - return privKeyBlock, nil - } -} - -type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) - -// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt -// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used -// as the decrypt function to parse an unencrypted private key. See -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. -func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { - const magic = "openssh-key-v1\x00" - if len(key) < len(magic) || string(key[:len(magic)]) != magic { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - if w.NumKeys != 1 { - // We only support single key files, and so does OpenSSH. - // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 - return nil, errors.New("ssh: multi-key files are not supported") - } - - privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) - if err != nil { - if err, ok := err.(*PassphraseMissingError); ok { - pub, errPub := ParsePublicKey(w.PubKey) - if errPub != nil { - return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) - } - err.PublicKey = pub - } - return nil, err - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { - if w.CipherName != "none" { - return nil, x509.IncorrectPasswordError - } - return nil, errors.New("ssh: malformed OpenSSH key") - } - - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - var curve elliptic.Curve - switch key.Curve { - case "nistp256": - curve = elliptic.P256() - case "nistp384": - curve = elliptic.P384() - case "nistp521": - curve = elliptic.P521() - default: - return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) - } - - X, Y := elliptic.Unmarshal(curve, key.Pub) - if X == nil || Y == nil { - return nil, errors.New("ssh: failed to unmarshal public key") - } - - if key.D.Cmp(curve.Params().N) >= 0 { - return nil, errors.New("ssh: scalar is out of range") - } - - x, y := curve.ScalarBaseMult(key.D.Bytes()) - if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { - return nil, errors.New("ssh: public key does not match private key") - } - - return &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: X, - Y: Y, - }, - D: key.D, - }, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -func checkOpenSSHKeyPadding(pad []byte) error { - for i, b := range pad { - if int(b) != i+1 { - return errors.New("ssh: padding not as expected") - } - } - return nil -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index 06a1b275..00000000 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-512-etm@openssh.com": {64, true, func(key []byte) hash.Hash { - return hmac.New(sha512.New, key) - }}, - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-512": {64, false, func(key []byte) hash.Hash { - return hmac.New(sha512.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index 922032d9..00000000 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,877 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Hellman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4419, section 5. -const msgKexDHGexGroup = 31 - -type kexDHGexGroupMsg struct { - P *big.Int `sshtype:"31"` - G *big.Int -} - -const msgKexDHGexInit = 32 - -type kexDHGexInitMsg struct { - X *big.Int `sshtype:"32"` -} - -const msgKexDHGexReply = 33 - -type kexDHGexReplyMsg struct { - HostKey []byte `sshtype:"33"` - Y *big.Int - Signature []byte -} - -const msgKexDHGexRequest = 34 - -type kexDHGexRequestMsg struct { - MinBits uint32 `sshtype:"34"` - PreferedBits uint32 - MaxBits uint32 -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 8308, section 2.3 -const msgExtInfo = 7 - -type extInfoMsg struct { - NumExtensions uint32 `sshtype:"7"` - Payload []byte `ssh:"rest"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4252, section 5.1 -const msgUserAuthSuccess = 52 - -// See RFC 4252, section 5.4 -const msgUserAuthBanner = 53 - -type userAuthBannerMsg struct { - Message string `sshtype:"53"` - // unused, but required to allow message parsing - Language string -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - Name string `sshtype:"60"` - Instruction string - Language string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersID uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersID uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersID uint32 `sshtype:"91"` - MyID uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersID uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersID uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersID uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersID uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersID uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersID uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersID uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// See RFC 4462, section 3 -const msgUserAuthGSSAPIResponse = 60 - -type userAuthGSSAPIResponse struct { - SupportMech []byte `sshtype:"60"` -} - -const msgUserAuthGSSAPIToken = 61 - -type userAuthGSSAPIToken struct { - Token []byte `sshtype:"61"` -} - -const msgUserAuthGSSAPIMIC = 66 - -type userAuthGSSAPIMIC struct { - MIC []byte `sshtype:"66"` -} - -// See RFC 4462, section 3.9 -const msgUserAuthGSSAPIErrTok = 64 - -type userAuthGSSAPIErrTok struct { - ErrorToken []byte `sshtype:"64"` -} - -// See RFC 4462, section 3.8 -const msgUserAuthGSSAPIError = 65 - -type userAuthGSSAPIError struct { - MajorStatus uint32 `sshtype:"65"` - MinorStatus uint32 - Message string - LanguageTag string -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgExtInfo: - msg = new(extInfoMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - case msgUserAuthGSSAPIToken: - msg = new(userAuthGSSAPIToken) - case msgUserAuthGSSAPIMIC: - msg = new(userAuthGSSAPIMIC) - case msgUserAuthGSSAPIErrTok: - msg = new(userAuthGSSAPIErrTok) - case msgUserAuthGSSAPIError: - msg = new(userAuthGSSAPIError) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -var packetTypeNames = map[byte]string{ - msgDisconnect: "disconnectMsg", - msgServiceRequest: "serviceRequestMsg", - msgServiceAccept: "serviceAcceptMsg", - msgExtInfo: "extInfoMsg", - msgKexInit: "kexInitMsg", - msgKexDHInit: "kexDHInitMsg", - msgKexDHReply: "kexDHReplyMsg", - msgUserAuthRequest: "userAuthRequestMsg", - msgUserAuthSuccess: "userAuthSuccessMsg", - msgUserAuthFailure: "userAuthFailureMsg", - msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", - msgGlobalRequest: "globalRequestMsg", - msgRequestSuccess: "globalRequestSuccessMsg", - msgRequestFailure: "globalRequestFailureMsg", - msgChannelOpen: "channelOpenMsg", - msgChannelData: "channelDataMsg", - msgChannelOpenConfirm: "channelOpenConfirmMsg", - msgChannelOpenFailure: "channelOpenFailureMsg", - msgChannelWindowAdjust: "windowAdjustMsg", - msgChannelEOF: "channelEOFMsg", - msgChannelClose: "channelCloseMsg", - msgChannelRequest: "channelRequestMsg", - msgChannelSuccess: "channelRequestSuccessMsg", - msgChannelFailure: "channelRequestFailureMsg", -} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index 9654c018..00000000 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return m.handleUnknownChannelPacket(id, packet) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersID: msg.PeersID, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersID - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersID: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} - -func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - // RFC 4254 section 5.4 says unrecognized channel requests should - // receive a failure response. - case *channelRequestMsg: - if msg.WantReply { - return m.sendMessage(channelRequestFailureMsg{ - PeersID: msg.PeersID, - }) - } - return nil - default: - return fmt.Errorf("ssh: invalid channel %d", id) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index b21322af..00000000 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -type GSSAPIWithMICConfig struct { - // AllowLogin, must be set, is called when gssapi-with-mic - // authentication is selected (RFC 4462 section 3). The srcName is from the - // results of the GSS-API authentication. The format is username@DOMAIN. - // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. - // This callback is called after the user identity is established with GSSAPI to decide if the user can login with - // which permissions. If the user is allowed to login, it should return a nil error. - AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) - - // Server must be set. It's the implementation - // of the GSSAPIServer interface. See GSSAPIServer interface for details. - Server GSSAPIServer -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - // To determine NoClientAuth at runtime, set NoClientAuth to true - // and the optional NoClientAuthCallback to a non-nil value. - NoClientAuth bool - - // NoClientAuthCallback, if non-nil, is called when a user - // attempts to authenticate with auth method "none". - // NoClientAuth must also be set to true for this be used, or - // this func is unused. - NoClientAuthCallback func(ConnMetadata) (*Permissions, error) - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return a nil error - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string - - // BannerCallback, if present, is called and the return string is sent to - // the client after key exchange completed but before authentication. - BannerCallback func(conn ConnMetadata) string - - // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used - // when gssapi-with-mic authentication is selected (RFC 4462 section 3). - GSSAPIWithMICConfig *GSSAPIWithMICConfig -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same public key format, it is replaced. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -// -// The returned error may be of type *ServerAuthError for -// authentication errors. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - // Check if the config contains any unsupported key exchanges - for _, kex := range fullConf.KeyExchanges { - if _, ok := serverForbiddenKexAlgos[kex]; ok { - return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) - } - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. algo is the negotiate -// algorithm and may be a certificate type. -func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) { - sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && - config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || - config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, - sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { - gssAPIServer := gssapiConfig.Server - defer gssAPIServer.DeleteSecContext() - var srcName string - for { - var ( - outToken []byte - needContinue bool - ) - outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) - if err != nil { - return err, nil, nil - } - if len(outToken) != 0 { - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: outToken, - })); err != nil { - return nil, nil, err - } - } - if !needContinue { - break - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, nil, err - } - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} - if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { - return nil, nil, err - } - mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) - if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { - return err, nil, nil - } - perms, authErr = gssapiConfig.AllowLogin(s, srcName) - return authErr, perms, nil -} - -// isAlgoCompatible checks if the signature format is compatible with the -// selected algorithm taking into account edge cases that occur with old -// clients. -func isAlgoCompatible(algo, sigFormat string) bool { - // Compatibility for old clients. - // - // For certificate authentication with OpenSSH 7.2-7.7 signature format can - // be rsa-sha2-256 or rsa-sha2-512 for the algorithm - // ssh-rsa-cert-v01@openssh.com. - // - // With gpg-agent < 2.2.6 the algorithm can be rsa-sha2-256 or rsa-sha2-512 - // for signature format ssh-rsa. - if isRSA(algo) && isRSA(sigFormat) { - return true - } - // Standard case: the underlying algorithm must match the signature format. - return underlyingAlgo(algo) == sigFormat -} - -// ServerAuthError represents server authentication errors and is -// sometimes returned by NewServerConn. It appends any authentication -// errors that may occur, and is returned if all of the authentication -// methods provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. The first entry is typically ErrNoAuth. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -// ErrNoAuth is the error value returned if no -// authentication method has been passed yet. This happens as a normal -// part of the authentication loop, since the client first tries -// 'none' authentication to discover available methods. -// It is returned in ServerAuthError.Errors from NewServerConn. -var ErrNoAuth = errors.New("ssh: no auth passed yet") - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - var displayedBanner bool - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { - return nil, err - } - } - } - - perms = nil - authErr := ErrNoAuth - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - if config.NoClientAuthCallback != nil { - perms, authErr = config.NoClientAuthCallback(s) - } else { - authErr = nil - } - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configured") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !contains(supportedPubKeyAuthAlgos, underlyingAlgo(algo)) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !contains(supportedPubKeyAuthAlgos, sig.Format) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) - break - } - if !isAlgoCompatible(algo, sig.Format) { - authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) - break - } - - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { - authErr = errors.New("ssh: gssapi-with-mic auth not configured") - break - } - gssapiConfig := config.GSSAPIWithMICConfig - userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) - if err != nil { - return nil, parseError(msgUserAuthRequest) - } - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. - if userAuthRequestGSSAPI.N == 0 { - authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") - break - } - var i uint32 - present := false - for i = 0; i < userAuthRequestGSSAPI.N; i++ { - if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { - present = true - break - } - } - if !present { - authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") - break - } - // Initial server response, see RFC 4462 section 3.3. - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ - SupportMech: krb5OID, - })); err != nil { - return nil, err - } - // Exchange token, see RFC 4462 section 3.4. - packet, err := s.transport.readPacket() - if err != nil { - return nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, err - } - authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, - userAuthReq) - if err != nil { - return nil, err - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { - // If we have hit the max attempts, don't bother sending the - // final SSH_MSG_USERAUTH_FAILURE message, since there are - // no more authentication methods which can be attempted, - // and this message may cause the client to re-attempt - // authentication while we send the disconnect message. - // Continue, and trigger the disconnect at the start of - // the loop. - // - // The SSH specification is somewhat confusing about this, - // RFC 4252 Section 5.1 requires each authentication failure - // be responded to with a respective SSH_MSG_USERAUTH_FAILURE - // message, but Section 4 says the server should disconnect - // after some number of attempts, but it isn't explicit which - // message should take precedence (i.e. should there be a failure - // message than a disconnect message, or if we are going to - // disconnect, should we only send that message.) - // - // Either way, OpenSSH disconnects immediately after the last - // failed authnetication attempt, and given they are typically - // considered the golden implementation it seems reasonable - // to match that behavior. - continue - } - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { - failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Name: name, - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index acef6225..00000000 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - IUTF8 = 42 // RFC 8160 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of io.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.7. -type ptyWindowChangeMsg struct { - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 -} - -// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. -func (s *Session) WindowChange(h, w int) error { - req := ptyWindowChangeMsg{ - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - } - _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = io.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = io.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go deleted file mode 100644 index 24bd7c8e..00000000 --- a/vendor/golang.org/x/crypto/ssh/ssh_gss.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/asn1" - "errors" -) - -var krb5OID []byte - -func init() { - krb5OID, _ = asn1.Marshal(krb5Mesh) -} - -// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. -type GSSAPIClient interface { - // InitSecContext initiates the establishment of a security context for GSS-API between the - // ssh client and ssh server. Initially the token parameter should be specified as nil. - // The routine may return a outputToken which should be transferred to - // the ssh server, where the ssh server will present it to - // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting - // needContinue to false. To complete the context - // establishment, one or more reply tokens may be required from the ssh - // server;if so, InitSecContext will return a needContinue which is true. - // In this case, InitSecContext should be called again when the - // reply token is received from the ssh server, passing the reply - // token to InitSecContext via the token parameters. - // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. - InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) - // GetMIC generates a cryptographic MIC for the SSH2 message, and places - // the MIC in a token for transfer to the ssh server. - // The contents of the MIC field are obtained by calling GSS_GetMIC() - // over the following, using the GSS-API context that was just - // established: - // string session identifier - // byte SSH_MSG_USERAUTH_REQUEST - // string user name - // string service - // string "gssapi-with-mic" - // See RFC 2743 section 2.3.1 and RFC 4462 3.5. - GetMIC(micFiled []byte) ([]byte, error) - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. -type GSSAPIServer interface { - // AcceptSecContext allows a remotely initiated security context between the application - // and a remote peer to be established by the ssh client. The routine may return a - // outputToken which should be transferred to the ssh client, - // where the ssh client will present it to InitSecContext. - // If no token need be sent, AcceptSecContext will indicate this - // by setting the needContinue to false. To - // complete the context establishment, one or more reply tokens may be - // required from the ssh client. if so, AcceptSecContext - // will return a needContinue which is true, in which case it - // should be called again when the reply token is received from the ssh - // client, passing the token to AcceptSecContext via the - // token parameters. - // The srcName return value is the authenticated username. - // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. - AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) - // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, - // fits the supplied message is received from the ssh client. - // See RFC 2743 section 2.3.2. - VerifyMIC(micField []byte, micToken []byte) error - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -var ( - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, - // so we also support the krb5 mechanism only. - // See RFC 1964 section 1. - krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} -) - -// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST -// See RFC 4462 section 3.2. -type userAuthRequestGSSAPI struct { - N uint32 - OIDS []asn1.ObjectIdentifier -} - -func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { - n, rest, ok := parseUint32(payload) - if !ok { - return nil, errors.New("parse uint32 failed") - } - s := &userAuthRequestGSSAPI{ - N: n, - OIDS: make([]asn1.ObjectIdentifier, n), - } - for i := 0; i < int(n); i++ { - var ( - desiredMech []byte - err error - ) - desiredMech, rest, ok = parseString(rest) - if !ok { - return nil, errors.New("parse string failed") - } - if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { - return nil, err - } - - } - return s, nil -} - -// See RFC 4462 section 3.6. -func buildMIC(sessionID string, username string, service string, authMethod string) []byte { - out := make([]byte, 0, 0) - out = appendString(out, sessionID) - out = append(out, msgUserAuthRequest) - out = appendString(out, username) - out = appendString(out, service) - out = appendString(out, authMethod) - return out -} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index b171b330..00000000 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,116 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index 80d35f5e..00000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// handleForwards starts goroutines handling forwarded connections. -// It's called on first use by (*Client).ListenTCP to not launch -// goroutines until needed. -func (c *Client) handleForwards() { - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go deleted file mode 100644 index a4d1919a..00000000 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Deprecated: this package moved to golang.org/x/term. -package terminal - -import ( - "io" - - "golang.org/x/term" -) - -// EscapeCodes contains escape sequences that can be written to the terminal in -// order to achieve different styles of text. -type EscapeCodes = term.EscapeCodes - -// Terminal contains the state for running a VT100 terminal that is capable of -// reading lines of input. -type Terminal = term.Terminal - -// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is -// a local terminal, that terminal must first have been put into raw mode. -// prompt is a string that is written at the start of each input line (i.e. -// "> "). -func NewTerminal(c io.ReadWriter, prompt string) *Terminal { - return term.NewTerminal(c, prompt) -} - -// ErrPasteIndicator may be returned from ReadLine as the error, in addition -// to valid line data. It indicates that bracketed paste mode is enabled and -// that the returned line consists only of pasted data. Programs may wish to -// interpret pasted data more literally than typed data. -var ErrPasteIndicator = term.ErrPasteIndicator - -// State contains the state of a terminal. -type State = term.State - -// IsTerminal returns whether the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - return term.IsTerminal(fd) -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - return term.ReadPassword(fd) -} - -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - return term.MakeRaw(fd) -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func Restore(fd int, oldState *State) error { - return term.Restore(fd, oldState) -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - return term.GetState(fd) -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - return term.GetSize(fd) -} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index da015801..00000000 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcm128CipherID = "aes128-gcm@openssh.com" - gcm256CipherID = "aes256-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writeCipherPacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readCipherPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) - if err != nil { - return err - } - t.reader.pendingKeyChange <- ciph - - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) - if err != nil { - return err - } - t.writer.pendingKeyChange <- ciph - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - cipherMode := cipherModes[algs.Cipher] - - iv := make([]byte, cipherMode.ivSize) - key := make([]byte, cipherMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - - var macKey []byte - if !aeadCiphers[algs.Cipher] { - macMode := macModes[algs.MAC] - macKey = make([]byte, macMode.keySize) - generateKeyMaterial(macKey, d.macKeyTag, kex) - } - - return cipherModes[algs.Cipher].create(key, iv, macKey, algs) -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for length := 0; length < maxVersionStringBytes; length++ { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - if !bytes.HasPrefix(versionString, []byte("SSH-")) { - // RFC 4253 says we need to ignore all version string lines - // except the one containing the SSH version (provided that - // all the lines do not exceed 255 bytes in total). - versionString = versionString[:0] - continue - } - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go deleted file mode 100644 index cd0a8ac1..00000000 --- a/vendor/golang.org/x/net/html/atom/atom.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package atom provides integer codes (also known as atoms) for a fixed set of -// frequently occurring HTML strings: tag names and attribute keys such as "p" -// and "id". -// -// Sharing an atom's name between all elements with the same tag can result in -// fewer string allocations when tokenizing and parsing HTML. Integer -// comparisons are also generally faster than string comparisons. -// -// The value of an atom's particular code is not guaranteed to stay the same -// between versions of this package. Neither is any ordering guaranteed: -// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to -// be dense. The only guarantees are that e.g. looking up "div" will yield -// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. -package atom // import "golang.org/x/net/html/atom" - -// Atom is an integer code for a string. The zero value maps to "". -type Atom uint32 - -// String returns the atom's name. -func (a Atom) String() string { - start := uint32(a >> 8) - n := uint32(a & 0xff) - if start+n > uint32(len(atomText)) { - return "" - } - return atomText[start : start+n] -} - -func (a Atom) string() string { - return atomText[a>>8 : a>>8+a&0xff] -} - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s []byte) uint32 { - for i := range s { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -func match(s string, t []byte) bool { - for i, c := range t { - if s[i] != c { - return false - } - } - return true -} - -// Lookup returns the atom whose name is s. It returns zero if there is no -// such atom. The lookup is case sensitive. -func Lookup(s []byte) Atom { - if len(s) == 0 || len(s) > maxAtomLen { - return 0 - } - h := fnv(hash0, s) - if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - return 0 -} - -// String returns a string whose contents are equal to s. In that sense, it is -// equivalent to string(s) but may be more efficient. -func String(s []byte) string { - if a := Lookup(s); a != 0 { - return a.String() - } - return string(s) -} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go deleted file mode 100644 index 2a938864..00000000 --- a/vendor/golang.org/x/net/html/atom/table.go +++ /dev/null @@ -1,783 +0,0 @@ -// Code generated by go generate gen.go; DO NOT EDIT. - -//go:generate go run gen.go - -package atom - -const ( - A Atom = 0x1 - Abbr Atom = 0x4 - Accept Atom = 0x1a06 - AcceptCharset Atom = 0x1a0e - Accesskey Atom = 0x2c09 - Acronym Atom = 0xaa07 - Action Atom = 0x27206 - Address Atom = 0x6f307 - Align Atom = 0xb105 - Allowfullscreen Atom = 0x2080f - Allowpaymentrequest Atom = 0xc113 - Allowusermedia Atom = 0xdd0e - Alt Atom = 0xf303 - Annotation Atom = 0x1c90a - AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31906 - Area Atom = 0x35604 - Article Atom = 0x3fc07 - As Atom = 0x3c02 - Aside Atom = 0x10705 - Async Atom = 0xff05 - Audio Atom = 0x11505 - Autocomplete Atom = 0x2780c - Autofocus Atom = 0x12109 - Autoplay Atom = 0x13c08 - B Atom = 0x101 - Base Atom = 0x3b04 - Basefont Atom = 0x3b08 - Bdi Atom = 0xba03 - Bdo Atom = 0x14b03 - Bgsound Atom = 0x15e07 - Big Atom = 0x17003 - Blink Atom = 0x17305 - Blockquote Atom = 0x1870a - Body Atom = 0x2804 - Br Atom = 0x202 - Button Atom = 0x19106 - Canvas Atom = 0x10306 - Caption Atom = 0x23107 - Center Atom = 0x22006 - Challenge Atom = 0x29b09 - Charset Atom = 0x2107 - Checked Atom = 0x47907 - Cite Atom = 0x19c04 - Class Atom = 0x56405 - Code Atom = 0x5c504 - Col Atom = 0x1ab03 - Colgroup Atom = 0x1ab08 - Color Atom = 0x1bf05 - Cols Atom = 0x1c404 - Colspan Atom = 0x1c407 - Command Atom = 0x1d707 - Content Atom = 0x58b07 - Contenteditable Atom = 0x58b0f - Contextmenu Atom = 0x3800b - Controls Atom = 0x1de08 - Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1fb0b - Data Atom = 0x4a504 - Datalist Atom = 0x4a508 - Datetime Atom = 0x2b808 - Dd Atom = 0x2d702 - Default Atom = 0x10a07 - Defer Atom = 0x5c705 - Del Atom = 0x45203 - Desc Atom = 0x56104 - Details Atom = 0x7207 - Dfn Atom = 0x8703 - Dialog Atom = 0xbb06 - Dir Atom = 0x9303 - Dirname Atom = 0x9307 - Disabled Atom = 0x16408 - Div Atom = 0x16b03 - Dl Atom = 0x5e602 - Download Atom = 0x46308 - Draggable Atom = 0x17a09 - Dropzone Atom = 0x40508 - Dt Atom = 0x64b02 - Em Atom = 0x6e02 - Embed Atom = 0x6e05 - Enctype Atom = 0x28d07 - Face Atom = 0x21e04 - Fieldset Atom = 0x22608 - Figcaption Atom = 0x22e0a - Figure Atom = 0x24806 - Font Atom = 0x3f04 - Footer Atom = 0xf606 - For Atom = 0x25403 - ForeignObject Atom = 0x2540d - Foreignobject Atom = 0x2610d - Form Atom = 0x26e04 - Formaction Atom = 0x26e0a - Formenctype Atom = 0x2890b - Formmethod Atom = 0x2a40a - Formnovalidate Atom = 0x2ae0e - Formtarget Atom = 0x2c00a - Frame Atom = 0x8b05 - Frameset Atom = 0x8b08 - H1 Atom = 0x15c02 - H2 Atom = 0x2de02 - H3 Atom = 0x30d02 - H4 Atom = 0x34502 - H5 Atom = 0x34f02 - H6 Atom = 0x64d02 - Head Atom = 0x33104 - Header Atom = 0x33106 - Headers Atom = 0x33107 - Height Atom = 0x5206 - Hgroup Atom = 0x2ca06 - Hidden Atom = 0x2d506 - High Atom = 0x2db04 - Hr Atom = 0x15702 - Href Atom = 0x2e004 - Hreflang Atom = 0x2e008 - Html Atom = 0x5604 - HttpEquiv Atom = 0x2e80a - I Atom = 0x601 - Icon Atom = 0x58a04 - Id Atom = 0x10902 - Iframe Atom = 0x2fc06 - Image Atom = 0x30205 - Img Atom = 0x30703 - Input Atom = 0x44b05 - Inputmode Atom = 0x44b09 - Ins Atom = 0x20403 - Integrity Atom = 0x23f09 - Is Atom = 0x16502 - Isindex Atom = 0x30f07 - Ismap Atom = 0x31605 - Itemid Atom = 0x38b06 - Itemprop Atom = 0x19d08 - Itemref Atom = 0x3cd07 - Itemscope Atom = 0x67109 - Itemtype Atom = 0x31f08 - Kbd Atom = 0xb903 - Keygen Atom = 0x3206 - Keytype Atom = 0xd607 - Kind Atom = 0x17704 - Label Atom = 0x5905 - Lang Atom = 0x2e404 - Legend Atom = 0x18106 - Li Atom = 0xb202 - Link Atom = 0x17404 - List Atom = 0x4a904 - Listing Atom = 0x4a907 - Loop Atom = 0x5d04 - Low Atom = 0xc303 - Main Atom = 0x1004 - Malignmark Atom = 0xb00a - Manifest Atom = 0x6d708 - Map Atom = 0x31803 - Mark Atom = 0xb604 - Marquee Atom = 0x32707 - Math Atom = 0x32e04 - Max Atom = 0x33d03 - Maxlength Atom = 0x33d09 - Media Atom = 0xe605 - Mediagroup Atom = 0xe60a - Menu Atom = 0x38704 - Menuitem Atom = 0x38708 - Meta Atom = 0x4b804 - Meter Atom = 0x9805 - Method Atom = 0x2a806 - Mglyph Atom = 0x30806 - Mi Atom = 0x34702 - Min Atom = 0x34703 - Minlength Atom = 0x34709 - Mn Atom = 0x2b102 - Mo Atom = 0xa402 - Ms Atom = 0x67402 - Mtext Atom = 0x35105 - Multiple Atom = 0x35f08 - Muted Atom = 0x36705 - Name Atom = 0x9604 - Nav Atom = 0x1303 - Nobr Atom = 0x3704 - Noembed Atom = 0x6c07 - Noframes Atom = 0x8908 - Nomodule Atom = 0xa208 - Nonce Atom = 0x1a605 - Noscript Atom = 0x21608 - Novalidate Atom = 0x2b20a - Object Atom = 0x26806 - Ol Atom = 0x13702 - Onabort Atom = 0x19507 - Onafterprint Atom = 0x2360c - Onautocomplete Atom = 0x2760e - Onautocompleteerror Atom = 0x27613 - Onauxclick Atom = 0x61f0a - Onbeforeprint Atom = 0x69e0d - Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x56d06 - Oncancel Atom = 0x11908 - Oncanplay Atom = 0x14d09 - Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41b08 - Onclick Atom = 0x2f507 - Onclose Atom = 0x36c07 - Oncontextmenu Atom = 0x37e0d - Oncopy Atom = 0x39106 - Oncuechange Atom = 0x3970b - Oncut Atom = 0x3a205 - Ondblclick Atom = 0x3a70a - Ondrag Atom = 0x3b106 - Ondragend Atom = 0x3b109 - Ondragenter Atom = 0x3ba0b - Ondragexit Atom = 0x3c50a - Ondragleave Atom = 0x3df0b - Ondragover Atom = 0x3ea0a - Ondragstart Atom = 0x3f40b - Ondrop Atom = 0x40306 - Ondurationchange Atom = 0x41310 - Onemptied Atom = 0x40a09 - Onended Atom = 0x42307 - Onerror Atom = 0x42a07 - Onfocus Atom = 0x43107 - Onhashchange Atom = 0x43d0c - Oninput Atom = 0x44907 - Oninvalid Atom = 0x45509 - Onkeydown Atom = 0x45e09 - Onkeypress Atom = 0x46b0a - Onkeyup Atom = 0x48007 - Onlanguagechange Atom = 0x48d10 - Onload Atom = 0x49d06 - Onloadeddata Atom = 0x49d0c - Onloadedmetadata Atom = 0x4b010 - Onloadend Atom = 0x4c609 - Onloadstart Atom = 0x4cf0b - Onmessage Atom = 0x4da09 - Onmessageerror Atom = 0x4da0e - Onmousedown Atom = 0x4e80b - Onmouseenter Atom = 0x4f30c - Onmouseleave Atom = 0x4ff0c - Onmousemove Atom = 0x50b0b - Onmouseout Atom = 0x5160a - Onmouseover Atom = 0x5230b - Onmouseup Atom = 0x52e09 - Onmousewheel Atom = 0x53c0c - Onoffline Atom = 0x54809 - Ononline Atom = 0x55108 - Onpagehide Atom = 0x5590a - Onpageshow Atom = 0x5730a - Onpaste Atom = 0x57f07 - Onpause Atom = 0x59a07 - Onplay Atom = 0x5a406 - Onplaying Atom = 0x5a409 - Onpopstate Atom = 0x5ad0a - Onprogress Atom = 0x5b70a - Onratechange Atom = 0x5cc0c - Onrejectionhandled Atom = 0x5d812 - Onreset Atom = 0x5ea07 - Onresize Atom = 0x5f108 - Onscroll Atom = 0x60008 - Onsecuritypolicyviolation Atom = 0x60819 - Onseeked Atom = 0x62908 - Onseeking Atom = 0x63109 - Onselect Atom = 0x63a08 - Onshow Atom = 0x64406 - Onsort Atom = 0x64f06 - Onstalled Atom = 0x65909 - Onstorage Atom = 0x66209 - Onsubmit Atom = 0x66b08 - Onsuspend Atom = 0x67b09 - Ontimeupdate Atom = 0x400c - Ontoggle Atom = 0x68408 - Onunhandledrejection Atom = 0x68c14 - Onunload Atom = 0x6ab08 - Onvolumechange Atom = 0x6b30e - Onwaiting Atom = 0x6c109 - Onwheel Atom = 0x6ca07 - Open Atom = 0x1a304 - Optgroup Atom = 0x5f08 - Optimum Atom = 0x6d107 - Option Atom = 0x6e306 - Output Atom = 0x51d06 - P Atom = 0xc01 - Param Atom = 0xc05 - Pattern Atom = 0x6607 - Picture Atom = 0x7b07 - Ping Atom = 0xef04 - Placeholder Atom = 0x1310b - Plaintext Atom = 0x1b209 - Playsinline Atom = 0x1400b - Poster Atom = 0x2cf06 - Pre Atom = 0x47003 - Preload Atom = 0x48607 - Progress Atom = 0x5b908 - Prompt Atom = 0x53606 - Public Atom = 0x58606 - Q Atom = 0xcf01 - Radiogroup Atom = 0x30a - Rb Atom = 0x3a02 - Readonly Atom = 0x35708 - Referrerpolicy Atom = 0x3d10e - Rel Atom = 0x48703 - Required Atom = 0x24c08 - Reversed Atom = 0x8008 - Rows Atom = 0x9c04 - Rowspan Atom = 0x9c07 - Rp Atom = 0x23c02 - Rt Atom = 0x19a02 - Rtc Atom = 0x19a03 - Ruby Atom = 0xfb04 - S Atom = 0x2501 - Samp Atom = 0x7804 - Sandbox Atom = 0x12907 - Scope Atom = 0x67505 - Scoped Atom = 0x67506 - Script Atom = 0x21806 - Seamless Atom = 0x37108 - Section Atom = 0x56807 - Select Atom = 0x63c06 - Selected Atom = 0x63c08 - Shape Atom = 0x1e505 - Size Atom = 0x5f504 - Sizes Atom = 0x5f505 - Slot Atom = 0x1ef04 - Small Atom = 0x20605 - Sortable Atom = 0x65108 - Sorted Atom = 0x33706 - Source Atom = 0x37806 - Spacer Atom = 0x43706 - Span Atom = 0x9f04 - Spellcheck Atom = 0x4740a - Src Atom = 0x5c003 - Srcdoc Atom = 0x5c006 - Srclang Atom = 0x5f907 - Srcset Atom = 0x6f906 - Start Atom = 0x3fa05 - Step Atom = 0x58304 - Strike Atom = 0xd206 - Strong Atom = 0x6dd06 - Style Atom = 0x6ff05 - Sub Atom = 0x66d03 - Summary Atom = 0x70407 - Sup Atom = 0x70b03 - Svg Atom = 0x70e03 - System Atom = 0x71106 - Tabindex Atom = 0x4be08 - Table Atom = 0x59505 - Target Atom = 0x2c406 - Tbody Atom = 0x2705 - Td Atom = 0x9202 - Template Atom = 0x71408 - Textarea Atom = 0x35208 - Tfoot Atom = 0xf505 - Th Atom = 0x15602 - Thead Atom = 0x33005 - Time Atom = 0x4204 - Title Atom = 0x11005 - Tr Atom = 0xcc02 - Track Atom = 0x1ba05 - Translate Atom = 0x1f209 - Tt Atom = 0x6802 - Type Atom = 0xd904 - Typemustmatch Atom = 0x2900d - U Atom = 0xb01 - Ul Atom = 0xa702 - Updateviacache Atom = 0x460e - Usemap Atom = 0x59e06 - Value Atom = 0x1505 - Var Atom = 0x16d03 - Video Atom = 0x2f105 - Wbr Atom = 0x57c03 - Width Atom = 0x64905 - Workertype Atom = 0x71c0a - Wrap Atom = 0x72604 - Xmp Atom = 0x12f03 -) - -const hash0 = 0x81cdf10e - -const maxAtomLen = 25 - -var table = [1 << 9]Atom{ - 0x1: 0xe60a, // mediagroup - 0x2: 0x2e404, // lang - 0x4: 0x2c09, // accesskey - 0x5: 0x8b08, // frameset - 0x7: 0x63a08, // onselect - 0x8: 0x71106, // system - 0xa: 0x64905, // width - 0xc: 0x2890b, // formenctype - 0xd: 0x13702, // ol - 0xe: 0x3970b, // oncuechange - 0x10: 0x14b03, // bdo - 0x11: 0x11505, // audio - 0x12: 0x17a09, // draggable - 0x14: 0x2f105, // video - 0x15: 0x2b102, // mn - 0x16: 0x38704, // menu - 0x17: 0x2cf06, // poster - 0x19: 0xf606, // footer - 0x1a: 0x2a806, // method - 0x1b: 0x2b808, // datetime - 0x1c: 0x19507, // onabort - 0x1d: 0x460e, // updateviacache - 0x1e: 0xff05, // async - 0x1f: 0x49d06, // onload - 0x21: 0x11908, // oncancel - 0x22: 0x62908, // onseeked - 0x23: 0x30205, // image - 0x24: 0x5d812, // onrejectionhandled - 0x26: 0x17404, // link - 0x27: 0x51d06, // output - 0x28: 0x33104, // head - 0x29: 0x4ff0c, // onmouseleave - 0x2a: 0x57f07, // onpaste - 0x2b: 0x5a409, // onplaying - 0x2c: 0x1c407, // colspan - 0x2f: 0x1bf05, // color - 0x30: 0x5f504, // size - 0x31: 0x2e80a, // http-equiv - 0x33: 0x601, // i - 0x34: 0x5590a, // onpagehide - 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42a07, // onerror - 0x3a: 0x3b08, // basefont - 0x3f: 0x1303, // nav - 0x40: 0x17704, // kind - 0x41: 0x35708, // readonly - 0x42: 0x30806, // mglyph - 0x44: 0xb202, // li - 0x46: 0x2d506, // hidden - 0x47: 0x70e03, // svg - 0x48: 0x58304, // step - 0x49: 0x23f09, // integrity - 0x4a: 0x58606, // public - 0x4c: 0x1ab03, // col - 0x4d: 0x1870a, // blockquote - 0x4e: 0x34f02, // h5 - 0x50: 0x5b908, // progress - 0x51: 0x5f505, // sizes - 0x52: 0x34502, // h4 - 0x56: 0x33005, // thead - 0x57: 0xd607, // keytype - 0x58: 0x5b70a, // onprogress - 0x59: 0x44b09, // inputmode - 0x5a: 0x3b109, // ondragend - 0x5d: 0x3a205, // oncut - 0x5e: 0x43706, // spacer - 0x5f: 0x1ab08, // colgroup - 0x62: 0x16502, // is - 0x65: 0x3c02, // as - 0x66: 0x54809, // onoffline - 0x67: 0x33706, // sorted - 0x69: 0x48d10, // onlanguagechange - 0x6c: 0x43d0c, // onhashchange - 0x6d: 0x9604, // name - 0x6e: 0xf505, // tfoot - 0x6f: 0x56104, // desc - 0x70: 0x33d03, // max - 0x72: 0x1ea06, // coords - 0x73: 0x30d02, // h3 - 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x9c04, // rows - 0x76: 0x63c06, // select - 0x77: 0x9805, // meter - 0x78: 0x38b06, // itemid - 0x79: 0x53c0c, // onmousewheel - 0x7a: 0x5c006, // srcdoc - 0x7d: 0x1ba05, // track - 0x7f: 0x31f08, // itemtype - 0x82: 0xa402, // mo - 0x83: 0x41b08, // onchange - 0x84: 0x33107, // headers - 0x85: 0x5cc0c, // onratechange - 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x4a508, // datalist - 0x89: 0x4e80b, // onmousedown - 0x8a: 0x1ef04, // slot - 0x8b: 0x4b010, // onloadedmetadata - 0x8c: 0x1a06, // accept - 0x8d: 0x26806, // object - 0x91: 0x6b30e, // onvolumechange - 0x92: 0x2107, // charset - 0x93: 0x27613, // onautocompleteerror - 0x94: 0xc113, // allowpaymentrequest - 0x95: 0x2804, // body - 0x96: 0x10a07, // default - 0x97: 0x63c08, // selected - 0x98: 0x21e04, // face - 0x99: 0x1e505, // shape - 0x9b: 0x68408, // ontoggle - 0x9e: 0x64b02, // dt - 0x9f: 0xb604, // mark - 0xa1: 0xb01, // u - 0xa4: 0x6ab08, // onunload - 0xa5: 0x5d04, // loop - 0xa6: 0x16408, // disabled - 0xaa: 0x42307, // onended - 0xab: 0xb00a, // malignmark - 0xad: 0x67b09, // onsuspend - 0xae: 0x35105, // mtext - 0xaf: 0x64f06, // onsort - 0xb0: 0x19d08, // itemprop - 0xb3: 0x67109, // itemscope - 0xb4: 0x17305, // blink - 0xb6: 0x3b106, // ondrag - 0xb7: 0xa702, // ul - 0xb8: 0x26e04, // form - 0xb9: 0x12907, // sandbox - 0xba: 0x8b05, // frame - 0xbb: 0x1505, // value - 0xbc: 0x66209, // onstorage - 0xbf: 0xaa07, // acronym - 0xc0: 0x19a02, // rt - 0xc2: 0x202, // br - 0xc3: 0x22608, // fieldset - 0xc4: 0x2900d, // typemustmatch - 0xc5: 0xa208, // nomodule - 0xc6: 0x6c07, // noembed - 0xc7: 0x69e0d, // onbeforeprint - 0xc8: 0x19106, // button - 0xc9: 0x2f507, // onclick - 0xca: 0x70407, // summary - 0xcd: 0xfb04, // ruby - 0xce: 0x56405, // class - 0xcf: 0x3f40b, // ondragstart - 0xd0: 0x23107, // caption - 0xd4: 0xdd0e, // allowusermedia - 0xd5: 0x4cf0b, // onloadstart - 0xd9: 0x16b03, // div - 0xda: 0x4a904, // list - 0xdb: 0x32e04, // math - 0xdc: 0x44b05, // input - 0xdf: 0x3ea0a, // ondragover - 0xe0: 0x2de02, // h2 - 0xe2: 0x1b209, // plaintext - 0xe4: 0x4f30c, // onmouseenter - 0xe7: 0x47907, // checked - 0xe8: 0x47003, // pre - 0xea: 0x35f08, // multiple - 0xeb: 0xba03, // bdi - 0xec: 0x33d09, // maxlength - 0xed: 0xcf01, // q - 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57c03, // wbr - 0xf2: 0x3b04, // base - 0xf3: 0x6e306, // option - 0xf5: 0x41310, // ondurationchange - 0xf7: 0x8908, // noframes - 0xf9: 0x40508, // dropzone - 0xfb: 0x67505, // scope - 0xfc: 0x8008, // reversed - 0xfd: 0x3ba0b, // ondragenter - 0xfe: 0x3fa05, // start - 0xff: 0x12f03, // xmp - 0x100: 0x5f907, // srclang - 0x101: 0x30703, // img - 0x104: 0x101, // b - 0x105: 0x25403, // for - 0x106: 0x10705, // aside - 0x107: 0x44907, // oninput - 0x108: 0x35604, // area - 0x109: 0x2a40a, // formmethod - 0x10a: 0x72604, // wrap - 0x10c: 0x23c02, // rp - 0x10d: 0x46b0a, // onkeypress - 0x10e: 0x6802, // tt - 0x110: 0x34702, // mi - 0x111: 0x36705, // muted - 0x112: 0xf303, // alt - 0x113: 0x5c504, // code - 0x114: 0x6e02, // em - 0x115: 0x3c50a, // ondragexit - 0x117: 0x9f04, // span - 0x119: 0x6d708, // manifest - 0x11a: 0x38708, // menuitem - 0x11b: 0x58b07, // content - 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4c609, // onloadend - 0x121: 0x37e0d, // oncontextmenu - 0x123: 0x56d06, // onblur - 0x124: 0x3fc07, // article - 0x125: 0x9303, // dir - 0x126: 0xef04, // ping - 0x127: 0x24c08, // required - 0x128: 0x45509, // oninvalid - 0x129: 0xb105, // align - 0x12b: 0x58a04, // icon - 0x12c: 0x64d02, // h6 - 0x12d: 0x1c404, // cols - 0x12e: 0x22e0a, // figcaption - 0x12f: 0x45e09, // onkeydown - 0x130: 0x66b08, // onsubmit - 0x131: 0x14d09, // oncanplay - 0x132: 0x70b03, // sup - 0x133: 0xc01, // p - 0x135: 0x40a09, // onemptied - 0x136: 0x39106, // oncopy - 0x137: 0x19c04, // cite - 0x138: 0x3a70a, // ondblclick - 0x13a: 0x50b0b, // onmousemove - 0x13c: 0x66d03, // sub - 0x13d: 0x48703, // rel - 0x13e: 0x5f08, // optgroup - 0x142: 0x9c07, // rowspan - 0x143: 0x37806, // source - 0x144: 0x21608, // noscript - 0x145: 0x1a304, // open - 0x146: 0x20403, // ins - 0x147: 0x2540d, // foreignObject - 0x148: 0x5ad0a, // onpopstate - 0x14a: 0x28d07, // enctype - 0x14b: 0x2760e, // onautocomplete - 0x14c: 0x35208, // textarea - 0x14e: 0x2780c, // autocomplete - 0x14f: 0x15702, // hr - 0x150: 0x1de08, // controls - 0x151: 0x10902, // id - 0x153: 0x2360c, // onafterprint - 0x155: 0x2610d, // foreignobject - 0x156: 0x32707, // marquee - 0x157: 0x59a07, // onpause - 0x158: 0x5e602, // dl - 0x159: 0x5206, // height - 0x15a: 0x34703, // min - 0x15b: 0x9307, // dirname - 0x15c: 0x1f209, // translate - 0x15d: 0x5604, // html - 0x15e: 0x34709, // minlength - 0x15f: 0x48607, // preload - 0x160: 0x71408, // template - 0x161: 0x3df0b, // ondragleave - 0x162: 0x3a02, // rb - 0x164: 0x5c003, // src - 0x165: 0x6dd06, // strong - 0x167: 0x7804, // samp - 0x168: 0x6f307, // address - 0x169: 0x55108, // ononline - 0x16b: 0x1310b, // placeholder - 0x16c: 0x2c406, // target - 0x16d: 0x20605, // small - 0x16e: 0x6ca07, // onwheel - 0x16f: 0x1c90a, // annotation - 0x170: 0x4740a, // spellcheck - 0x171: 0x7207, // details - 0x172: 0x10306, // canvas - 0x173: 0x12109, // autofocus - 0x174: 0xc05, // param - 0x176: 0x46308, // download - 0x177: 0x45203, // del - 0x178: 0x36c07, // onclose - 0x179: 0xb903, // kbd - 0x17a: 0x31906, // applet - 0x17b: 0x2e004, // href - 0x17c: 0x5f108, // onresize - 0x17e: 0x49d0c, // onloadeddata - 0x180: 0xcc02, // tr - 0x181: 0x2c00a, // formtarget - 0x182: 0x11005, // title - 0x183: 0x6ff05, // style - 0x184: 0xd206, // strike - 0x185: 0x59e06, // usemap - 0x186: 0x2fc06, // iframe - 0x187: 0x1004, // main - 0x189: 0x7b07, // picture - 0x18c: 0x31605, // ismap - 0x18e: 0x4a504, // data - 0x18f: 0x5905, // label - 0x191: 0x3d10e, // referrerpolicy - 0x192: 0x15602, // th - 0x194: 0x53606, // prompt - 0x195: 0x56807, // section - 0x197: 0x6d107, // optimum - 0x198: 0x2db04, // high - 0x199: 0x15c02, // h1 - 0x19a: 0x65909, // onstalled - 0x19b: 0x16d03, // var - 0x19c: 0x4204, // time - 0x19e: 0x67402, // ms - 0x19f: 0x33106, // header - 0x1a0: 0x4da09, // onmessage - 0x1a1: 0x1a605, // nonce - 0x1a2: 0x26e0a, // formaction - 0x1a3: 0x22006, // center - 0x1a4: 0x3704, // nobr - 0x1a5: 0x59505, // table - 0x1a6: 0x4a907, // listing - 0x1a7: 0x18106, // legend - 0x1a9: 0x29b09, // challenge - 0x1aa: 0x24806, // figure - 0x1ab: 0xe605, // media - 0x1ae: 0xd904, // type - 0x1af: 0x3f04, // font - 0x1b0: 0x4da0e, // onmessageerror - 0x1b1: 0x37108, // seamless - 0x1b2: 0x8703, // dfn - 0x1b3: 0x5c705, // defer - 0x1b4: 0xc303, // low - 0x1b5: 0x19a03, // rtc - 0x1b6: 0x5230b, // onmouseover - 0x1b7: 0x2b20a, // novalidate - 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3cd07, // itemref - 0x1bd: 0x1, // a - 0x1be: 0x31803, // map - 0x1bf: 0x400c, // ontimeupdate - 0x1c0: 0x15e07, // bgsound - 0x1c1: 0x3206, // keygen - 0x1c2: 0x2705, // tbody - 0x1c5: 0x64406, // onshow - 0x1c7: 0x2501, // s - 0x1c8: 0x6607, // pattern - 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2d702, // dd - 0x1cf: 0x6f906, // srcset - 0x1d0: 0x17003, // big - 0x1d2: 0x65108, // sortable - 0x1d3: 0x48007, // onkeyup - 0x1d5: 0x5a406, // onplay - 0x1d7: 0x4b804, // meta - 0x1d8: 0x40306, // ondrop - 0x1da: 0x60008, // onscroll - 0x1db: 0x1fb0b, // crossorigin - 0x1dc: 0x5730a, // onpageshow - 0x1dd: 0x4, // abbr - 0x1de: 0x9202, // td - 0x1df: 0x58b0f, // contenteditable - 0x1e0: 0x27206, // action - 0x1e1: 0x1400b, // playsinline - 0x1e2: 0x43107, // onfocus - 0x1e3: 0x2e008, // hreflang - 0x1e5: 0x5160a, // onmouseout - 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x13c08, // autoplay - 0x1e8: 0x63109, // onseeking - 0x1ea: 0x67506, // scoped - 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3800b, // contextmenu - 0x1ef: 0x52e09, // onmouseup - 0x1f1: 0x2ca06, // hgroup - 0x1f2: 0x2080f, // allowfullscreen - 0x1f3: 0x4be08, // tabindex - 0x1f6: 0x30f07, // isindex - 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2ae0e, // formnovalidate - 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x6e05, // embed - 0x1fd: 0x21806, // script - 0x1fe: 0xbb06, // dialog - 0x1ff: 0x1d707, // command -} - -const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + - "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + - "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + - "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + - "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + - "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + - "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + - "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + - "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + - "ignObjectforeignobjectformactionautocompleteerrorformenctype" + - "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + - "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + - "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + - "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + - "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + - "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + - "articleondropzonemptiedondurationchangeonendedonerroronfocus" + - "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + - "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + - "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + - "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + - "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + - "classectionbluronpageshowbronpastepublicontenteditableonpaus" + - "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + - "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + - "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + - "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + - "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + - "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + - "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go deleted file mode 100644 index ff7acf2d..00000000 --- a/vendor/golang.org/x/net/html/const.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// Section 12.2.4.2 of the HTML5 specification says "The following elements -// have varying levels of special parsing rules". -// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements -var isSpecialElementMap = map[string]bool{ - "address": true, - "applet": true, - "area": true, - "article": true, - "aside": true, - "base": true, - "basefont": true, - "bgsound": true, - "blockquote": true, - "body": true, - "br": true, - "button": true, - "caption": true, - "center": true, - "col": true, - "colgroup": true, - "dd": true, - "details": true, - "dir": true, - "div": true, - "dl": true, - "dt": true, - "embed": true, - "fieldset": true, - "figcaption": true, - "figure": true, - "footer": true, - "form": true, - "frame": true, - "frameset": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "header": true, - "hgroup": true, - "hr": true, - "html": true, - "iframe": true, - "img": true, - "input": true, - "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. - "li": true, - "link": true, - "listing": true, - "main": true, - "marquee": true, - "menu": true, - "meta": true, - "nav": true, - "noembed": true, - "noframes": true, - "noscript": true, - "object": true, - "ol": true, - "p": true, - "param": true, - "plaintext": true, - "pre": true, - "script": true, - "section": true, - "select": true, - "source": true, - "style": true, - "summary": true, - "table": true, - "tbody": true, - "td": true, - "template": true, - "textarea": true, - "tfoot": true, - "th": true, - "thead": true, - "title": true, - "tr": true, - "track": true, - "ul": true, - "wbr": true, - "xmp": true, -} - -func isSpecialElement(element *Node) bool { - switch element.Namespace { - case "", "html": - return isSpecialElementMap[element.Data] - case "math": - switch element.Data { - case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": - return true - } - case "svg": - switch element.Data { - case "foreignObject", "desc", "title": - return true - } - } - return false -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go deleted file mode 100644 index 2466ae3d..00000000 --- a/vendor/golang.org/x/net/html/doc.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package html implements an HTML5-compliant tokenizer and parser. - -Tokenization is done by creating a Tokenizer for an io.Reader r. It is the -caller's responsibility to ensure that r provides UTF-8 encoded HTML. - - z := html.NewTokenizer(r) - -Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), -which parses the next token and returns its type, or an error: - - for { - tt := z.Next() - if tt == html.ErrorToken { - // ... - return ... - } - // Process the current token. - } - -There are two APIs for retrieving the current token. The high-level API is to -call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs -allow optionally calling Raw after Next but before Token, Text, TagName, or -TagAttr. In EBNF notation, the valid call sequence per token is: - - Next {Raw} [ Token | Text | TagName {TagAttr} ] - -Token returns an independent data structure that completely describes a token. -Entities (such as "<") are unescaped, tag names and attribute keys are -lower-cased, and attributes are collected into a []Attribute. For example: - - for { - if z.Next() == html.ErrorToken { - // Returning io.EOF indicates success. - return z.Err() - } - emitToken(z.Token()) - } - -The low-level API performs fewer allocations and copies, but the contents of -the []byte values returned by Text, TagName and TagAttr may change on the next -call to Next. For example, to extract an HTML page's anchor text: - - depth := 0 - for { - tt := z.Next() - switch tt { - case html.ErrorToken: - return z.Err() - case html.TextToken: - if depth > 0 { - // emitBytes should copy the []byte it receives, - // if it doesn't process it immediately. - emitBytes(z.Text()) - } - case html.StartTagToken, html.EndTagToken: - tn, _ := z.TagName() - if len(tn) == 1 && tn[0] == 'a' { - if tt == html.StartTagToken { - depth++ - } else { - depth-- - } - } - } - } - -Parsing is done by calling Parse with an io.Reader, which returns the root of -the parse tree (the document element) as a *Node. It is the caller's -responsibility to ensure that the Reader provides UTF-8 encoded HTML. For -example, to process each anchor node in depth-first order: - - doc, err := html.Parse(r) - if err != nil { - // ... - } - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - // Do something with n... - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } - } - f(doc) - -The relevant specifications include: -https://html.spec.whatwg.org/multipage/syntax.html and -https://html.spec.whatwg.org/multipage/syntax.html#tokenization - -# Security Considerations - -Care should be taken when parsing and interpreting HTML, whether full documents -or fragments, within the framework of the HTML specification, especially with -regard to untrusted inputs. - -This package provides both a tokenizer and a parser, which implement the -tokenization, and tokenization and tree construction stages of the WHATWG HTML -parsing specification respectively. While the tokenizer parses and normalizes -individual HTML tokens, only the parser constructs the DOM tree from the -tokenized HTML, as described in the tree construction stage of the -specification, dynamically modifying or extending the docuemnt's DOM tree. - -If your use case requires semantically well-formed HTML documents, as defined by -the WHATWG specification, the parser should be used rather than the tokenizer. - -In security contexts, if trust decisions are being made using the tokenized or -parsed content, the input must be re-serialized (for instance by using Render or -Token.String) in order for those trust decisions to hold, as the process of -tokenization or parsing may alter the content. -*/ -package html // import "golang.org/x/net/html" - -// The tokenization algorithm implemented by this package is not a line-by-line -// transliteration of the relatively verbose state-machine in the WHATWG -// specification. A more direct approach is used instead, where the program -// counter implies the state, such as whether it is tokenizing a tag or a text -// node. Specification compliance is verified by checking expected and actual -// outputs over a test suite rather than aiming for algorithmic fidelity. - -// TODO(nigeltao): Does a DOM API belong in this package or a separate one? -// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go deleted file mode 100644 index c484e5a9..00000000 --- a/vendor/golang.org/x/net/html/doctype.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -// parseDoctype parses the data from a DoctypeToken into a name, -// public identifier, and system identifier. It returns a Node whose Type -// is DoctypeNode, whose Data is the name, and which has attributes -// named "system" and "public" for the two identifiers if they were present. -// quirks is whether the document should be parsed in "quirks mode". -func parseDoctype(s string) (n *Node, quirks bool) { - n = &Node{Type: DoctypeNode} - - // Find the name. - space := strings.IndexAny(s, whitespace) - if space == -1 { - space = len(s) - } - n.Data = s[:space] - // The comparison to "html" is case-sensitive. - if n.Data != "html" { - quirks = true - } - n.Data = strings.ToLower(n.Data) - s = strings.TrimLeft(s[space:], whitespace) - - if len(s) < 6 { - // It can't start with "PUBLIC" or "SYSTEM". - // Ignore the rest of the string. - return n, quirks || s != "" - } - - key := strings.ToLower(s[:6]) - s = s[6:] - for key == "public" || key == "system" { - s = strings.TrimLeft(s, whitespace) - if s == "" { - break - } - quote := s[0] - if quote != '"' && quote != '\'' { - break - } - s = s[1:] - q := strings.IndexRune(s, rune(quote)) - var id string - if q == -1 { - id = s - s = "" - } else { - id = s[:q] - s = s[q+1:] - } - n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) - if key == "public" { - key = "system" - } else { - key = "" - } - } - - if key != "" || s != "" { - quirks = true - } else if len(n.Attr) > 0 { - if n.Attr[0].Key == "public" { - public := strings.ToLower(n.Attr[0].Val) - switch public { - case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": - quirks = true - default: - for _, q := range quirkyIDs { - if strings.HasPrefix(public, q) { - quirks = true - break - } - } - } - // The following two public IDs only cause quirks mode if there is no system ID. - if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || - strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { - quirks = true - } - } - if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { - quirks = true - } - } - - return n, quirks -} - -// quirkyIDs is a list of public doctype identifiers that cause a document -// to be interpreted in quirks mode. The identifiers should be in lower case. -var quirkyIDs = []string{ - "+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//", -} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go deleted file mode 100644 index b628880a..00000000 --- a/vendor/golang.org/x/net/html/entity.go +++ /dev/null @@ -1,2253 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// All entities that do not end with ';' are 6 or fewer bytes long. -const longestEntityWithoutSemicolon = 6 - -// entity is a map from HTML entity names to their values. The semicolon matters: -// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references -// lists both "amp" and "amp;" as two separate entries. -// -// Note that the HTML5 list is larger than the HTML4 list at -// http://www.w3.org/TR/html4/sgml/entities.html -var entity = map[string]rune{ - "AElig;": '\U000000C6', - "AMP;": '\U00000026', - "Aacute;": '\U000000C1', - "Abreve;": '\U00000102', - "Acirc;": '\U000000C2', - "Acy;": '\U00000410', - "Afr;": '\U0001D504', - "Agrave;": '\U000000C0', - "Alpha;": '\U00000391', - "Amacr;": '\U00000100', - "And;": '\U00002A53', - "Aogon;": '\U00000104', - "Aopf;": '\U0001D538', - "ApplyFunction;": '\U00002061', - "Aring;": '\U000000C5', - "Ascr;": '\U0001D49C', - "Assign;": '\U00002254', - "Atilde;": '\U000000C3', - "Auml;": '\U000000C4', - "Backslash;": '\U00002216', - "Barv;": '\U00002AE7', - "Barwed;": '\U00002306', - "Bcy;": '\U00000411', - "Because;": '\U00002235', - "Bernoullis;": '\U0000212C', - "Beta;": '\U00000392', - "Bfr;": '\U0001D505', - "Bopf;": '\U0001D539', - "Breve;": '\U000002D8', - "Bscr;": '\U0000212C', - "Bumpeq;": '\U0000224E', - "CHcy;": '\U00000427', - "COPY;": '\U000000A9', - "Cacute;": '\U00000106', - "Cap;": '\U000022D2', - "CapitalDifferentialD;": '\U00002145', - "Cayleys;": '\U0000212D', - "Ccaron;": '\U0000010C', - "Ccedil;": '\U000000C7', - "Ccirc;": '\U00000108', - "Cconint;": '\U00002230', - "Cdot;": '\U0000010A', - "Cedilla;": '\U000000B8', - "CenterDot;": '\U000000B7', - "Cfr;": '\U0000212D', - "Chi;": '\U000003A7', - "CircleDot;": '\U00002299', - "CircleMinus;": '\U00002296', - "CirclePlus;": '\U00002295', - "CircleTimes;": '\U00002297', - "ClockwiseContourIntegral;": '\U00002232', - "CloseCurlyDoubleQuote;": '\U0000201D', - "CloseCurlyQuote;": '\U00002019', - "Colon;": '\U00002237', - "Colone;": '\U00002A74', - "Congruent;": '\U00002261', - "Conint;": '\U0000222F', - "ContourIntegral;": '\U0000222E', - "Copf;": '\U00002102', - "Coproduct;": '\U00002210', - "CounterClockwiseContourIntegral;": '\U00002233', - "Cross;": '\U00002A2F', - "Cscr;": '\U0001D49E', - "Cup;": '\U000022D3', - "CupCap;": '\U0000224D', - "DD;": '\U00002145', - "DDotrahd;": '\U00002911', - "DJcy;": '\U00000402', - "DScy;": '\U00000405', - "DZcy;": '\U0000040F', - "Dagger;": '\U00002021', - "Darr;": '\U000021A1', - "Dashv;": '\U00002AE4', - "Dcaron;": '\U0000010E', - "Dcy;": '\U00000414', - "Del;": '\U00002207', - "Delta;": '\U00000394', - "Dfr;": '\U0001D507', - "DiacriticalAcute;": '\U000000B4', - "DiacriticalDot;": '\U000002D9', - "DiacriticalDoubleAcute;": '\U000002DD', - "DiacriticalGrave;": '\U00000060', - "DiacriticalTilde;": '\U000002DC', - "Diamond;": '\U000022C4', - "DifferentialD;": '\U00002146', - "Dopf;": '\U0001D53B', - "Dot;": '\U000000A8', - "DotDot;": '\U000020DC', - "DotEqual;": '\U00002250', - "DoubleContourIntegral;": '\U0000222F', - "DoubleDot;": '\U000000A8', - "DoubleDownArrow;": '\U000021D3', - "DoubleLeftArrow;": '\U000021D0', - "DoubleLeftRightArrow;": '\U000021D4', - "DoubleLeftTee;": '\U00002AE4', - "DoubleLongLeftArrow;": '\U000027F8', - "DoubleLongLeftRightArrow;": '\U000027FA', - "DoubleLongRightArrow;": '\U000027F9', - "DoubleRightArrow;": '\U000021D2', - "DoubleRightTee;": '\U000022A8', - "DoubleUpArrow;": '\U000021D1', - "DoubleUpDownArrow;": '\U000021D5', - "DoubleVerticalBar;": '\U00002225', - "DownArrow;": '\U00002193', - "DownArrowBar;": '\U00002913', - "DownArrowUpArrow;": '\U000021F5', - "DownBreve;": '\U00000311', - "DownLeftRightVector;": '\U00002950', - "DownLeftTeeVector;": '\U0000295E', - "DownLeftVector;": '\U000021BD', - "DownLeftVectorBar;": '\U00002956', - "DownRightTeeVector;": '\U0000295F', - "DownRightVector;": '\U000021C1', - "DownRightVectorBar;": '\U00002957', - "DownTee;": '\U000022A4', - "DownTeeArrow;": '\U000021A7', - "Downarrow;": '\U000021D3', - "Dscr;": '\U0001D49F', - "Dstrok;": '\U00000110', - "ENG;": '\U0000014A', - "ETH;": '\U000000D0', - "Eacute;": '\U000000C9', - "Ecaron;": '\U0000011A', - "Ecirc;": '\U000000CA', - "Ecy;": '\U0000042D', - "Edot;": '\U00000116', - "Efr;": '\U0001D508', - "Egrave;": '\U000000C8', - "Element;": '\U00002208', - "Emacr;": '\U00000112', - "EmptySmallSquare;": '\U000025FB', - "EmptyVerySmallSquare;": '\U000025AB', - "Eogon;": '\U00000118', - "Eopf;": '\U0001D53C', - "Epsilon;": '\U00000395', - "Equal;": '\U00002A75', - "EqualTilde;": '\U00002242', - "Equilibrium;": '\U000021CC', - "Escr;": '\U00002130', - "Esim;": '\U00002A73', - "Eta;": '\U00000397', - "Euml;": '\U000000CB', - "Exists;": '\U00002203', - "ExponentialE;": '\U00002147', - "Fcy;": '\U00000424', - "Ffr;": '\U0001D509', - "FilledSmallSquare;": '\U000025FC', - "FilledVerySmallSquare;": '\U000025AA', - "Fopf;": '\U0001D53D', - "ForAll;": '\U00002200', - "Fouriertrf;": '\U00002131', - "Fscr;": '\U00002131', - "GJcy;": '\U00000403', - "GT;": '\U0000003E', - "Gamma;": '\U00000393', - "Gammad;": '\U000003DC', - "Gbreve;": '\U0000011E', - "Gcedil;": '\U00000122', - "Gcirc;": '\U0000011C', - "Gcy;": '\U00000413', - "Gdot;": '\U00000120', - "Gfr;": '\U0001D50A', - "Gg;": '\U000022D9', - "Gopf;": '\U0001D53E', - "GreaterEqual;": '\U00002265', - "GreaterEqualLess;": '\U000022DB', - "GreaterFullEqual;": '\U00002267', - "GreaterGreater;": '\U00002AA2', - "GreaterLess;": '\U00002277', - "GreaterSlantEqual;": '\U00002A7E', - "GreaterTilde;": '\U00002273', - "Gscr;": '\U0001D4A2', - "Gt;": '\U0000226B', - "HARDcy;": '\U0000042A', - "Hacek;": '\U000002C7', - "Hat;": '\U0000005E', - "Hcirc;": '\U00000124', - "Hfr;": '\U0000210C', - "HilbertSpace;": '\U0000210B', - "Hopf;": '\U0000210D', - "HorizontalLine;": '\U00002500', - "Hscr;": '\U0000210B', - "Hstrok;": '\U00000126', - "HumpDownHump;": '\U0000224E', - "HumpEqual;": '\U0000224F', - "IEcy;": '\U00000415', - "IJlig;": '\U00000132', - "IOcy;": '\U00000401', - "Iacute;": '\U000000CD', - "Icirc;": '\U000000CE', - "Icy;": '\U00000418', - "Idot;": '\U00000130', - "Ifr;": '\U00002111', - "Igrave;": '\U000000CC', - "Im;": '\U00002111', - "Imacr;": '\U0000012A', - "ImaginaryI;": '\U00002148', - "Implies;": '\U000021D2', - "Int;": '\U0000222C', - "Integral;": '\U0000222B', - "Intersection;": '\U000022C2', - "InvisibleComma;": '\U00002063', - "InvisibleTimes;": '\U00002062', - "Iogon;": '\U0000012E', - "Iopf;": '\U0001D540', - "Iota;": '\U00000399', - "Iscr;": '\U00002110', - "Itilde;": '\U00000128', - "Iukcy;": '\U00000406', - "Iuml;": '\U000000CF', - "Jcirc;": '\U00000134', - "Jcy;": '\U00000419', - "Jfr;": '\U0001D50D', - "Jopf;": '\U0001D541', - "Jscr;": '\U0001D4A5', - "Jsercy;": '\U00000408', - "Jukcy;": '\U00000404', - "KHcy;": '\U00000425', - "KJcy;": '\U0000040C', - "Kappa;": '\U0000039A', - "Kcedil;": '\U00000136', - "Kcy;": '\U0000041A', - "Kfr;": '\U0001D50E', - "Kopf;": '\U0001D542', - "Kscr;": '\U0001D4A6', - "LJcy;": '\U00000409', - "LT;": '\U0000003C', - "Lacute;": '\U00000139', - "Lambda;": '\U0000039B', - "Lang;": '\U000027EA', - "Laplacetrf;": '\U00002112', - "Larr;": '\U0000219E', - "Lcaron;": '\U0000013D', - "Lcedil;": '\U0000013B', - "Lcy;": '\U0000041B', - "LeftAngleBracket;": '\U000027E8', - "LeftArrow;": '\U00002190', - "LeftArrowBar;": '\U000021E4', - "LeftArrowRightArrow;": '\U000021C6', - "LeftCeiling;": '\U00002308', - "LeftDoubleBracket;": '\U000027E6', - "LeftDownTeeVector;": '\U00002961', - "LeftDownVector;": '\U000021C3', - "LeftDownVectorBar;": '\U00002959', - "LeftFloor;": '\U0000230A', - "LeftRightArrow;": '\U00002194', - "LeftRightVector;": '\U0000294E', - "LeftTee;": '\U000022A3', - "LeftTeeArrow;": '\U000021A4', - "LeftTeeVector;": '\U0000295A', - "LeftTriangle;": '\U000022B2', - "LeftTriangleBar;": '\U000029CF', - "LeftTriangleEqual;": '\U000022B4', - "LeftUpDownVector;": '\U00002951', - "LeftUpTeeVector;": '\U00002960', - "LeftUpVector;": '\U000021BF', - "LeftUpVectorBar;": '\U00002958', - "LeftVector;": '\U000021BC', - "LeftVectorBar;": '\U00002952', - "Leftarrow;": '\U000021D0', - "Leftrightarrow;": '\U000021D4', - "LessEqualGreater;": '\U000022DA', - "LessFullEqual;": '\U00002266', - "LessGreater;": '\U00002276', - "LessLess;": '\U00002AA1', - "LessSlantEqual;": '\U00002A7D', - "LessTilde;": '\U00002272', - "Lfr;": '\U0001D50F', - "Ll;": '\U000022D8', - "Lleftarrow;": '\U000021DA', - "Lmidot;": '\U0000013F', - "LongLeftArrow;": '\U000027F5', - "LongLeftRightArrow;": '\U000027F7', - "LongRightArrow;": '\U000027F6', - "Longleftarrow;": '\U000027F8', - "Longleftrightarrow;": '\U000027FA', - "Longrightarrow;": '\U000027F9', - "Lopf;": '\U0001D543', - "LowerLeftArrow;": '\U00002199', - "LowerRightArrow;": '\U00002198', - "Lscr;": '\U00002112', - "Lsh;": '\U000021B0', - "Lstrok;": '\U00000141', - "Lt;": '\U0000226A', - "Map;": '\U00002905', - "Mcy;": '\U0000041C', - "MediumSpace;": '\U0000205F', - "Mellintrf;": '\U00002133', - "Mfr;": '\U0001D510', - "MinusPlus;": '\U00002213', - "Mopf;": '\U0001D544', - "Mscr;": '\U00002133', - "Mu;": '\U0000039C', - "NJcy;": '\U0000040A', - "Nacute;": '\U00000143', - "Ncaron;": '\U00000147', - "Ncedil;": '\U00000145', - "Ncy;": '\U0000041D', - "NegativeMediumSpace;": '\U0000200B', - "NegativeThickSpace;": '\U0000200B', - "NegativeThinSpace;": '\U0000200B', - "NegativeVeryThinSpace;": '\U0000200B', - "NestedGreaterGreater;": '\U0000226B', - "NestedLessLess;": '\U0000226A', - "NewLine;": '\U0000000A', - "Nfr;": '\U0001D511', - "NoBreak;": '\U00002060', - "NonBreakingSpace;": '\U000000A0', - "Nopf;": '\U00002115', - "Not;": '\U00002AEC', - "NotCongruent;": '\U00002262', - "NotCupCap;": '\U0000226D', - "NotDoubleVerticalBar;": '\U00002226', - "NotElement;": '\U00002209', - "NotEqual;": '\U00002260', - "NotExists;": '\U00002204', - "NotGreater;": '\U0000226F', - "NotGreaterEqual;": '\U00002271', - "NotGreaterLess;": '\U00002279', - "NotGreaterTilde;": '\U00002275', - "NotLeftTriangle;": '\U000022EA', - "NotLeftTriangleEqual;": '\U000022EC', - "NotLess;": '\U0000226E', - "NotLessEqual;": '\U00002270', - "NotLessGreater;": '\U00002278', - "NotLessTilde;": '\U00002274', - "NotPrecedes;": '\U00002280', - "NotPrecedesSlantEqual;": '\U000022E0', - "NotReverseElement;": '\U0000220C', - "NotRightTriangle;": '\U000022EB', - "NotRightTriangleEqual;": '\U000022ED', - "NotSquareSubsetEqual;": '\U000022E2', - "NotSquareSupersetEqual;": '\U000022E3', - "NotSubsetEqual;": '\U00002288', - "NotSucceeds;": '\U00002281', - "NotSucceedsSlantEqual;": '\U000022E1', - "NotSupersetEqual;": '\U00002289', - "NotTilde;": '\U00002241', - "NotTildeEqual;": '\U00002244', - "NotTildeFullEqual;": '\U00002247', - "NotTildeTilde;": '\U00002249', - "NotVerticalBar;": '\U00002224', - "Nscr;": '\U0001D4A9', - "Ntilde;": '\U000000D1', - "Nu;": '\U0000039D', - "OElig;": '\U00000152', - "Oacute;": '\U000000D3', - "Ocirc;": '\U000000D4', - "Ocy;": '\U0000041E', - "Odblac;": '\U00000150', - "Ofr;": '\U0001D512', - "Ograve;": '\U000000D2', - "Omacr;": '\U0000014C', - "Omega;": '\U000003A9', - "Omicron;": '\U0000039F', - "Oopf;": '\U0001D546', - "OpenCurlyDoubleQuote;": '\U0000201C', - "OpenCurlyQuote;": '\U00002018', - "Or;": '\U00002A54', - "Oscr;": '\U0001D4AA', - "Oslash;": '\U000000D8', - "Otilde;": '\U000000D5', - "Otimes;": '\U00002A37', - "Ouml;": '\U000000D6', - "OverBar;": '\U0000203E', - "OverBrace;": '\U000023DE', - "OverBracket;": '\U000023B4', - "OverParenthesis;": '\U000023DC', - "PartialD;": '\U00002202', - "Pcy;": '\U0000041F', - "Pfr;": '\U0001D513', - "Phi;": '\U000003A6', - "Pi;": '\U000003A0', - "PlusMinus;": '\U000000B1', - "Poincareplane;": '\U0000210C', - "Popf;": '\U00002119', - "Pr;": '\U00002ABB', - "Precedes;": '\U0000227A', - "PrecedesEqual;": '\U00002AAF', - "PrecedesSlantEqual;": '\U0000227C', - "PrecedesTilde;": '\U0000227E', - "Prime;": '\U00002033', - "Product;": '\U0000220F', - "Proportion;": '\U00002237', - "Proportional;": '\U0000221D', - "Pscr;": '\U0001D4AB', - "Psi;": '\U000003A8', - "QUOT;": '\U00000022', - "Qfr;": '\U0001D514', - "Qopf;": '\U0000211A', - "Qscr;": '\U0001D4AC', - "RBarr;": '\U00002910', - "REG;": '\U000000AE', - "Racute;": '\U00000154', - "Rang;": '\U000027EB', - "Rarr;": '\U000021A0', - "Rarrtl;": '\U00002916', - "Rcaron;": '\U00000158', - "Rcedil;": '\U00000156', - "Rcy;": '\U00000420', - "Re;": '\U0000211C', - "ReverseElement;": '\U0000220B', - "ReverseEquilibrium;": '\U000021CB', - "ReverseUpEquilibrium;": '\U0000296F', - "Rfr;": '\U0000211C', - "Rho;": '\U000003A1', - "RightAngleBracket;": '\U000027E9', - "RightArrow;": '\U00002192', - "RightArrowBar;": '\U000021E5', - "RightArrowLeftArrow;": '\U000021C4', - "RightCeiling;": '\U00002309', - "RightDoubleBracket;": '\U000027E7', - "RightDownTeeVector;": '\U0000295D', - "RightDownVector;": '\U000021C2', - "RightDownVectorBar;": '\U00002955', - "RightFloor;": '\U0000230B', - "RightTee;": '\U000022A2', - "RightTeeArrow;": '\U000021A6', - "RightTeeVector;": '\U0000295B', - "RightTriangle;": '\U000022B3', - "RightTriangleBar;": '\U000029D0', - "RightTriangleEqual;": '\U000022B5', - "RightUpDownVector;": '\U0000294F', - "RightUpTeeVector;": '\U0000295C', - "RightUpVector;": '\U000021BE', - "RightUpVectorBar;": '\U00002954', - "RightVector;": '\U000021C0', - "RightVectorBar;": '\U00002953', - "Rightarrow;": '\U000021D2', - "Ropf;": '\U0000211D', - "RoundImplies;": '\U00002970', - "Rrightarrow;": '\U000021DB', - "Rscr;": '\U0000211B', - "Rsh;": '\U000021B1', - "RuleDelayed;": '\U000029F4', - "SHCHcy;": '\U00000429', - "SHcy;": '\U00000428', - "SOFTcy;": '\U0000042C', - "Sacute;": '\U0000015A', - "Sc;": '\U00002ABC', - "Scaron;": '\U00000160', - "Scedil;": '\U0000015E', - "Scirc;": '\U0000015C', - "Scy;": '\U00000421', - "Sfr;": '\U0001D516', - "ShortDownArrow;": '\U00002193', - "ShortLeftArrow;": '\U00002190', - "ShortRightArrow;": '\U00002192', - "ShortUpArrow;": '\U00002191', - "Sigma;": '\U000003A3', - "SmallCircle;": '\U00002218', - "Sopf;": '\U0001D54A', - "Sqrt;": '\U0000221A', - "Square;": '\U000025A1', - "SquareIntersection;": '\U00002293', - "SquareSubset;": '\U0000228F', - "SquareSubsetEqual;": '\U00002291', - "SquareSuperset;": '\U00002290', - "SquareSupersetEqual;": '\U00002292', - "SquareUnion;": '\U00002294', - "Sscr;": '\U0001D4AE', - "Star;": '\U000022C6', - "Sub;": '\U000022D0', - "Subset;": '\U000022D0', - "SubsetEqual;": '\U00002286', - "Succeeds;": '\U0000227B', - "SucceedsEqual;": '\U00002AB0', - "SucceedsSlantEqual;": '\U0000227D', - "SucceedsTilde;": '\U0000227F', - "SuchThat;": '\U0000220B', - "Sum;": '\U00002211', - "Sup;": '\U000022D1', - "Superset;": '\U00002283', - "SupersetEqual;": '\U00002287', - "Supset;": '\U000022D1', - "THORN;": '\U000000DE', - "TRADE;": '\U00002122', - "TSHcy;": '\U0000040B', - "TScy;": '\U00000426', - "Tab;": '\U00000009', - "Tau;": '\U000003A4', - "Tcaron;": '\U00000164', - "Tcedil;": '\U00000162', - "Tcy;": '\U00000422', - "Tfr;": '\U0001D517', - "Therefore;": '\U00002234', - "Theta;": '\U00000398', - "ThinSpace;": '\U00002009', - "Tilde;": '\U0000223C', - "TildeEqual;": '\U00002243', - "TildeFullEqual;": '\U00002245', - "TildeTilde;": '\U00002248', - "Topf;": '\U0001D54B', - "TripleDot;": '\U000020DB', - "Tscr;": '\U0001D4AF', - "Tstrok;": '\U00000166', - "Uacute;": '\U000000DA', - "Uarr;": '\U0000219F', - "Uarrocir;": '\U00002949', - "Ubrcy;": '\U0000040E', - "Ubreve;": '\U0000016C', - "Ucirc;": '\U000000DB', - "Ucy;": '\U00000423', - "Udblac;": '\U00000170', - "Ufr;": '\U0001D518', - "Ugrave;": '\U000000D9', - "Umacr;": '\U0000016A', - "UnderBar;": '\U0000005F', - "UnderBrace;": '\U000023DF', - "UnderBracket;": '\U000023B5', - "UnderParenthesis;": '\U000023DD', - "Union;": '\U000022C3', - "UnionPlus;": '\U0000228E', - "Uogon;": '\U00000172', - "Uopf;": '\U0001D54C', - "UpArrow;": '\U00002191', - "UpArrowBar;": '\U00002912', - "UpArrowDownArrow;": '\U000021C5', - "UpDownArrow;": '\U00002195', - "UpEquilibrium;": '\U0000296E', - "UpTee;": '\U000022A5', - "UpTeeArrow;": '\U000021A5', - "Uparrow;": '\U000021D1', - "Updownarrow;": '\U000021D5', - "UpperLeftArrow;": '\U00002196', - "UpperRightArrow;": '\U00002197', - "Upsi;": '\U000003D2', - "Upsilon;": '\U000003A5', - "Uring;": '\U0000016E', - "Uscr;": '\U0001D4B0', - "Utilde;": '\U00000168', - "Uuml;": '\U000000DC', - "VDash;": '\U000022AB', - "Vbar;": '\U00002AEB', - "Vcy;": '\U00000412', - "Vdash;": '\U000022A9', - "Vdashl;": '\U00002AE6', - "Vee;": '\U000022C1', - "Verbar;": '\U00002016', - "Vert;": '\U00002016', - "VerticalBar;": '\U00002223', - "VerticalLine;": '\U0000007C', - "VerticalSeparator;": '\U00002758', - "VerticalTilde;": '\U00002240', - "VeryThinSpace;": '\U0000200A', - "Vfr;": '\U0001D519', - "Vopf;": '\U0001D54D', - "Vscr;": '\U0001D4B1', - "Vvdash;": '\U000022AA', - "Wcirc;": '\U00000174', - "Wedge;": '\U000022C0', - "Wfr;": '\U0001D51A', - "Wopf;": '\U0001D54E', - "Wscr;": '\U0001D4B2', - "Xfr;": '\U0001D51B', - "Xi;": '\U0000039E', - "Xopf;": '\U0001D54F', - "Xscr;": '\U0001D4B3', - "YAcy;": '\U0000042F', - "YIcy;": '\U00000407', - "YUcy;": '\U0000042E', - "Yacute;": '\U000000DD', - "Ycirc;": '\U00000176', - "Ycy;": '\U0000042B', - "Yfr;": '\U0001D51C', - "Yopf;": '\U0001D550', - "Yscr;": '\U0001D4B4', - "Yuml;": '\U00000178', - "ZHcy;": '\U00000416', - "Zacute;": '\U00000179', - "Zcaron;": '\U0000017D', - "Zcy;": '\U00000417', - "Zdot;": '\U0000017B', - "ZeroWidthSpace;": '\U0000200B', - "Zeta;": '\U00000396', - "Zfr;": '\U00002128', - "Zopf;": '\U00002124', - "Zscr;": '\U0001D4B5', - "aacute;": '\U000000E1', - "abreve;": '\U00000103', - "ac;": '\U0000223E', - "acd;": '\U0000223F', - "acirc;": '\U000000E2', - "acute;": '\U000000B4', - "acy;": '\U00000430', - "aelig;": '\U000000E6', - "af;": '\U00002061', - "afr;": '\U0001D51E', - "agrave;": '\U000000E0', - "alefsym;": '\U00002135', - "aleph;": '\U00002135', - "alpha;": '\U000003B1', - "amacr;": '\U00000101', - "amalg;": '\U00002A3F', - "amp;": '\U00000026', - "and;": '\U00002227', - "andand;": '\U00002A55', - "andd;": '\U00002A5C', - "andslope;": '\U00002A58', - "andv;": '\U00002A5A', - "ang;": '\U00002220', - "ange;": '\U000029A4', - "angle;": '\U00002220', - "angmsd;": '\U00002221', - "angmsdaa;": '\U000029A8', - "angmsdab;": '\U000029A9', - "angmsdac;": '\U000029AA', - "angmsdad;": '\U000029AB', - "angmsdae;": '\U000029AC', - "angmsdaf;": '\U000029AD', - "angmsdag;": '\U000029AE', - "angmsdah;": '\U000029AF', - "angrt;": '\U0000221F', - "angrtvb;": '\U000022BE', - "angrtvbd;": '\U0000299D', - "angsph;": '\U00002222', - "angst;": '\U000000C5', - "angzarr;": '\U0000237C', - "aogon;": '\U00000105', - "aopf;": '\U0001D552', - "ap;": '\U00002248', - "apE;": '\U00002A70', - "apacir;": '\U00002A6F', - "ape;": '\U0000224A', - "apid;": '\U0000224B', - "apos;": '\U00000027', - "approx;": '\U00002248', - "approxeq;": '\U0000224A', - "aring;": '\U000000E5', - "ascr;": '\U0001D4B6', - "ast;": '\U0000002A', - "asymp;": '\U00002248', - "asympeq;": '\U0000224D', - "atilde;": '\U000000E3', - "auml;": '\U000000E4', - "awconint;": '\U00002233', - "awint;": '\U00002A11', - "bNot;": '\U00002AED', - "backcong;": '\U0000224C', - "backepsilon;": '\U000003F6', - "backprime;": '\U00002035', - "backsim;": '\U0000223D', - "backsimeq;": '\U000022CD', - "barvee;": '\U000022BD', - "barwed;": '\U00002305', - "barwedge;": '\U00002305', - "bbrk;": '\U000023B5', - "bbrktbrk;": '\U000023B6', - "bcong;": '\U0000224C', - "bcy;": '\U00000431', - "bdquo;": '\U0000201E', - "becaus;": '\U00002235', - "because;": '\U00002235', - "bemptyv;": '\U000029B0', - "bepsi;": '\U000003F6', - "bernou;": '\U0000212C', - "beta;": '\U000003B2', - "beth;": '\U00002136', - "between;": '\U0000226C', - "bfr;": '\U0001D51F', - "bigcap;": '\U000022C2', - "bigcirc;": '\U000025EF', - "bigcup;": '\U000022C3', - "bigodot;": '\U00002A00', - "bigoplus;": '\U00002A01', - "bigotimes;": '\U00002A02', - "bigsqcup;": '\U00002A06', - "bigstar;": '\U00002605', - "bigtriangledown;": '\U000025BD', - "bigtriangleup;": '\U000025B3', - "biguplus;": '\U00002A04', - "bigvee;": '\U000022C1', - "bigwedge;": '\U000022C0', - "bkarow;": '\U0000290D', - "blacklozenge;": '\U000029EB', - "blacksquare;": '\U000025AA', - "blacktriangle;": '\U000025B4', - "blacktriangledown;": '\U000025BE', - "blacktriangleleft;": '\U000025C2', - "blacktriangleright;": '\U000025B8', - "blank;": '\U00002423', - "blk12;": '\U00002592', - "blk14;": '\U00002591', - "blk34;": '\U00002593', - "block;": '\U00002588', - "bnot;": '\U00002310', - "bopf;": '\U0001D553', - "bot;": '\U000022A5', - "bottom;": '\U000022A5', - "bowtie;": '\U000022C8', - "boxDL;": '\U00002557', - "boxDR;": '\U00002554', - "boxDl;": '\U00002556', - "boxDr;": '\U00002553', - "boxH;": '\U00002550', - "boxHD;": '\U00002566', - "boxHU;": '\U00002569', - "boxHd;": '\U00002564', - "boxHu;": '\U00002567', - "boxUL;": '\U0000255D', - "boxUR;": '\U0000255A', - "boxUl;": '\U0000255C', - "boxUr;": '\U00002559', - "boxV;": '\U00002551', - "boxVH;": '\U0000256C', - "boxVL;": '\U00002563', - "boxVR;": '\U00002560', - "boxVh;": '\U0000256B', - "boxVl;": '\U00002562', - "boxVr;": '\U0000255F', - "boxbox;": '\U000029C9', - "boxdL;": '\U00002555', - "boxdR;": '\U00002552', - "boxdl;": '\U00002510', - "boxdr;": '\U0000250C', - "boxh;": '\U00002500', - "boxhD;": '\U00002565', - "boxhU;": '\U00002568', - "boxhd;": '\U0000252C', - "boxhu;": '\U00002534', - "boxminus;": '\U0000229F', - "boxplus;": '\U0000229E', - "boxtimes;": '\U000022A0', - "boxuL;": '\U0000255B', - "boxuR;": '\U00002558', - "boxul;": '\U00002518', - "boxur;": '\U00002514', - "boxv;": '\U00002502', - "boxvH;": '\U0000256A', - "boxvL;": '\U00002561', - "boxvR;": '\U0000255E', - "boxvh;": '\U0000253C', - "boxvl;": '\U00002524', - "boxvr;": '\U0000251C', - "bprime;": '\U00002035', - "breve;": '\U000002D8', - "brvbar;": '\U000000A6', - "bscr;": '\U0001D4B7', - "bsemi;": '\U0000204F', - "bsim;": '\U0000223D', - "bsime;": '\U000022CD', - "bsol;": '\U0000005C', - "bsolb;": '\U000029C5', - "bsolhsub;": '\U000027C8', - "bull;": '\U00002022', - "bullet;": '\U00002022', - "bump;": '\U0000224E', - "bumpE;": '\U00002AAE', - "bumpe;": '\U0000224F', - "bumpeq;": '\U0000224F', - "cacute;": '\U00000107', - "cap;": '\U00002229', - "capand;": '\U00002A44', - "capbrcup;": '\U00002A49', - "capcap;": '\U00002A4B', - "capcup;": '\U00002A47', - "capdot;": '\U00002A40', - "caret;": '\U00002041', - "caron;": '\U000002C7', - "ccaps;": '\U00002A4D', - "ccaron;": '\U0000010D', - "ccedil;": '\U000000E7', - "ccirc;": '\U00000109', - "ccups;": '\U00002A4C', - "ccupssm;": '\U00002A50', - "cdot;": '\U0000010B', - "cedil;": '\U000000B8', - "cemptyv;": '\U000029B2', - "cent;": '\U000000A2', - "centerdot;": '\U000000B7', - "cfr;": '\U0001D520', - "chcy;": '\U00000447', - "check;": '\U00002713', - "checkmark;": '\U00002713', - "chi;": '\U000003C7', - "cir;": '\U000025CB', - "cirE;": '\U000029C3', - "circ;": '\U000002C6', - "circeq;": '\U00002257', - "circlearrowleft;": '\U000021BA', - "circlearrowright;": '\U000021BB', - "circledR;": '\U000000AE', - "circledS;": '\U000024C8', - "circledast;": '\U0000229B', - "circledcirc;": '\U0000229A', - "circleddash;": '\U0000229D', - "cire;": '\U00002257', - "cirfnint;": '\U00002A10', - "cirmid;": '\U00002AEF', - "cirscir;": '\U000029C2', - "clubs;": '\U00002663', - "clubsuit;": '\U00002663', - "colon;": '\U0000003A', - "colone;": '\U00002254', - "coloneq;": '\U00002254', - "comma;": '\U0000002C', - "commat;": '\U00000040', - "comp;": '\U00002201', - "compfn;": '\U00002218', - "complement;": '\U00002201', - "complexes;": '\U00002102', - "cong;": '\U00002245', - "congdot;": '\U00002A6D', - "conint;": '\U0000222E', - "copf;": '\U0001D554', - "coprod;": '\U00002210', - "copy;": '\U000000A9', - "copysr;": '\U00002117', - "crarr;": '\U000021B5', - "cross;": '\U00002717', - "cscr;": '\U0001D4B8', - "csub;": '\U00002ACF', - "csube;": '\U00002AD1', - "csup;": '\U00002AD0', - "csupe;": '\U00002AD2', - "ctdot;": '\U000022EF', - "cudarrl;": '\U00002938', - "cudarrr;": '\U00002935', - "cuepr;": '\U000022DE', - "cuesc;": '\U000022DF', - "cularr;": '\U000021B6', - "cularrp;": '\U0000293D', - "cup;": '\U0000222A', - "cupbrcap;": '\U00002A48', - "cupcap;": '\U00002A46', - "cupcup;": '\U00002A4A', - "cupdot;": '\U0000228D', - "cupor;": '\U00002A45', - "curarr;": '\U000021B7', - "curarrm;": '\U0000293C', - "curlyeqprec;": '\U000022DE', - "curlyeqsucc;": '\U000022DF', - "curlyvee;": '\U000022CE', - "curlywedge;": '\U000022CF', - "curren;": '\U000000A4', - "curvearrowleft;": '\U000021B6', - "curvearrowright;": '\U000021B7', - "cuvee;": '\U000022CE', - "cuwed;": '\U000022CF', - "cwconint;": '\U00002232', - "cwint;": '\U00002231', - "cylcty;": '\U0000232D', - "dArr;": '\U000021D3', - "dHar;": '\U00002965', - "dagger;": '\U00002020', - "daleth;": '\U00002138', - "darr;": '\U00002193', - "dash;": '\U00002010', - "dashv;": '\U000022A3', - "dbkarow;": '\U0000290F', - "dblac;": '\U000002DD', - "dcaron;": '\U0000010F', - "dcy;": '\U00000434', - "dd;": '\U00002146', - "ddagger;": '\U00002021', - "ddarr;": '\U000021CA', - "ddotseq;": '\U00002A77', - "deg;": '\U000000B0', - "delta;": '\U000003B4', - "demptyv;": '\U000029B1', - "dfisht;": '\U0000297F', - "dfr;": '\U0001D521', - "dharl;": '\U000021C3', - "dharr;": '\U000021C2', - "diam;": '\U000022C4', - "diamond;": '\U000022C4', - "diamondsuit;": '\U00002666', - "diams;": '\U00002666', - "die;": '\U000000A8', - "digamma;": '\U000003DD', - "disin;": '\U000022F2', - "div;": '\U000000F7', - "divide;": '\U000000F7', - "divideontimes;": '\U000022C7', - "divonx;": '\U000022C7', - "djcy;": '\U00000452', - "dlcorn;": '\U0000231E', - "dlcrop;": '\U0000230D', - "dollar;": '\U00000024', - "dopf;": '\U0001D555', - "dot;": '\U000002D9', - "doteq;": '\U00002250', - "doteqdot;": '\U00002251', - "dotminus;": '\U00002238', - "dotplus;": '\U00002214', - "dotsquare;": '\U000022A1', - "doublebarwedge;": '\U00002306', - "downarrow;": '\U00002193', - "downdownarrows;": '\U000021CA', - "downharpoonleft;": '\U000021C3', - "downharpoonright;": '\U000021C2', - "drbkarow;": '\U00002910', - "drcorn;": '\U0000231F', - "drcrop;": '\U0000230C', - "dscr;": '\U0001D4B9', - "dscy;": '\U00000455', - "dsol;": '\U000029F6', - "dstrok;": '\U00000111', - "dtdot;": '\U000022F1', - "dtri;": '\U000025BF', - "dtrif;": '\U000025BE', - "duarr;": '\U000021F5', - "duhar;": '\U0000296F', - "dwangle;": '\U000029A6', - "dzcy;": '\U0000045F', - "dzigrarr;": '\U000027FF', - "eDDot;": '\U00002A77', - "eDot;": '\U00002251', - "eacute;": '\U000000E9', - "easter;": '\U00002A6E', - "ecaron;": '\U0000011B', - "ecir;": '\U00002256', - "ecirc;": '\U000000EA', - "ecolon;": '\U00002255', - "ecy;": '\U0000044D', - "edot;": '\U00000117', - "ee;": '\U00002147', - "efDot;": '\U00002252', - "efr;": '\U0001D522', - "eg;": '\U00002A9A', - "egrave;": '\U000000E8', - "egs;": '\U00002A96', - "egsdot;": '\U00002A98', - "el;": '\U00002A99', - "elinters;": '\U000023E7', - "ell;": '\U00002113', - "els;": '\U00002A95', - "elsdot;": '\U00002A97', - "emacr;": '\U00000113', - "empty;": '\U00002205', - "emptyset;": '\U00002205', - "emptyv;": '\U00002205', - "emsp;": '\U00002003', - "emsp13;": '\U00002004', - "emsp14;": '\U00002005', - "eng;": '\U0000014B', - "ensp;": '\U00002002', - "eogon;": '\U00000119', - "eopf;": '\U0001D556', - "epar;": '\U000022D5', - "eparsl;": '\U000029E3', - "eplus;": '\U00002A71', - "epsi;": '\U000003B5', - "epsilon;": '\U000003B5', - "epsiv;": '\U000003F5', - "eqcirc;": '\U00002256', - "eqcolon;": '\U00002255', - "eqsim;": '\U00002242', - "eqslantgtr;": '\U00002A96', - "eqslantless;": '\U00002A95', - "equals;": '\U0000003D', - "equest;": '\U0000225F', - "equiv;": '\U00002261', - "equivDD;": '\U00002A78', - "eqvparsl;": '\U000029E5', - "erDot;": '\U00002253', - "erarr;": '\U00002971', - "escr;": '\U0000212F', - "esdot;": '\U00002250', - "esim;": '\U00002242', - "eta;": '\U000003B7', - "eth;": '\U000000F0', - "euml;": '\U000000EB', - "euro;": '\U000020AC', - "excl;": '\U00000021', - "exist;": '\U00002203', - "expectation;": '\U00002130', - "exponentiale;": '\U00002147', - "fallingdotseq;": '\U00002252', - "fcy;": '\U00000444', - "female;": '\U00002640', - "ffilig;": '\U0000FB03', - "fflig;": '\U0000FB00', - "ffllig;": '\U0000FB04', - "ffr;": '\U0001D523', - "filig;": '\U0000FB01', - "flat;": '\U0000266D', - "fllig;": '\U0000FB02', - "fltns;": '\U000025B1', - "fnof;": '\U00000192', - "fopf;": '\U0001D557', - "forall;": '\U00002200', - "fork;": '\U000022D4', - "forkv;": '\U00002AD9', - "fpartint;": '\U00002A0D', - "frac12;": '\U000000BD', - "frac13;": '\U00002153', - "frac14;": '\U000000BC', - "frac15;": '\U00002155', - "frac16;": '\U00002159', - "frac18;": '\U0000215B', - "frac23;": '\U00002154', - "frac25;": '\U00002156', - "frac34;": '\U000000BE', - "frac35;": '\U00002157', - "frac38;": '\U0000215C', - "frac45;": '\U00002158', - "frac56;": '\U0000215A', - "frac58;": '\U0000215D', - "frac78;": '\U0000215E', - "frasl;": '\U00002044', - "frown;": '\U00002322', - "fscr;": '\U0001D4BB', - "gE;": '\U00002267', - "gEl;": '\U00002A8C', - "gacute;": '\U000001F5', - "gamma;": '\U000003B3', - "gammad;": '\U000003DD', - "gap;": '\U00002A86', - "gbreve;": '\U0000011F', - "gcirc;": '\U0000011D', - "gcy;": '\U00000433', - "gdot;": '\U00000121', - "ge;": '\U00002265', - "gel;": '\U000022DB', - "geq;": '\U00002265', - "geqq;": '\U00002267', - "geqslant;": '\U00002A7E', - "ges;": '\U00002A7E', - "gescc;": '\U00002AA9', - "gesdot;": '\U00002A80', - "gesdoto;": '\U00002A82', - "gesdotol;": '\U00002A84', - "gesles;": '\U00002A94', - "gfr;": '\U0001D524', - "gg;": '\U0000226B', - "ggg;": '\U000022D9', - "gimel;": '\U00002137', - "gjcy;": '\U00000453', - "gl;": '\U00002277', - "glE;": '\U00002A92', - "gla;": '\U00002AA5', - "glj;": '\U00002AA4', - "gnE;": '\U00002269', - "gnap;": '\U00002A8A', - "gnapprox;": '\U00002A8A', - "gne;": '\U00002A88', - "gneq;": '\U00002A88', - "gneqq;": '\U00002269', - "gnsim;": '\U000022E7', - "gopf;": '\U0001D558', - "grave;": '\U00000060', - "gscr;": '\U0000210A', - "gsim;": '\U00002273', - "gsime;": '\U00002A8E', - "gsiml;": '\U00002A90', - "gt;": '\U0000003E', - "gtcc;": '\U00002AA7', - "gtcir;": '\U00002A7A', - "gtdot;": '\U000022D7', - "gtlPar;": '\U00002995', - "gtquest;": '\U00002A7C', - "gtrapprox;": '\U00002A86', - "gtrarr;": '\U00002978', - "gtrdot;": '\U000022D7', - "gtreqless;": '\U000022DB', - "gtreqqless;": '\U00002A8C', - "gtrless;": '\U00002277', - "gtrsim;": '\U00002273', - "hArr;": '\U000021D4', - "hairsp;": '\U0000200A', - "half;": '\U000000BD', - "hamilt;": '\U0000210B', - "hardcy;": '\U0000044A', - "harr;": '\U00002194', - "harrcir;": '\U00002948', - "harrw;": '\U000021AD', - "hbar;": '\U0000210F', - "hcirc;": '\U00000125', - "hearts;": '\U00002665', - "heartsuit;": '\U00002665', - "hellip;": '\U00002026', - "hercon;": '\U000022B9', - "hfr;": '\U0001D525', - "hksearow;": '\U00002925', - "hkswarow;": '\U00002926', - "hoarr;": '\U000021FF', - "homtht;": '\U0000223B', - "hookleftarrow;": '\U000021A9', - "hookrightarrow;": '\U000021AA', - "hopf;": '\U0001D559', - "horbar;": '\U00002015', - "hscr;": '\U0001D4BD', - "hslash;": '\U0000210F', - "hstrok;": '\U00000127', - "hybull;": '\U00002043', - "hyphen;": '\U00002010', - "iacute;": '\U000000ED', - "ic;": '\U00002063', - "icirc;": '\U000000EE', - "icy;": '\U00000438', - "iecy;": '\U00000435', - "iexcl;": '\U000000A1', - "iff;": '\U000021D4', - "ifr;": '\U0001D526', - "igrave;": '\U000000EC', - "ii;": '\U00002148', - "iiiint;": '\U00002A0C', - "iiint;": '\U0000222D', - "iinfin;": '\U000029DC', - "iiota;": '\U00002129', - "ijlig;": '\U00000133', - "imacr;": '\U0000012B', - "image;": '\U00002111', - "imagline;": '\U00002110', - "imagpart;": '\U00002111', - "imath;": '\U00000131', - "imof;": '\U000022B7', - "imped;": '\U000001B5', - "in;": '\U00002208', - "incare;": '\U00002105', - "infin;": '\U0000221E', - "infintie;": '\U000029DD', - "inodot;": '\U00000131', - "int;": '\U0000222B', - "intcal;": '\U000022BA', - "integers;": '\U00002124', - "intercal;": '\U000022BA', - "intlarhk;": '\U00002A17', - "intprod;": '\U00002A3C', - "iocy;": '\U00000451', - "iogon;": '\U0000012F', - "iopf;": '\U0001D55A', - "iota;": '\U000003B9', - "iprod;": '\U00002A3C', - "iquest;": '\U000000BF', - "iscr;": '\U0001D4BE', - "isin;": '\U00002208', - "isinE;": '\U000022F9', - "isindot;": '\U000022F5', - "isins;": '\U000022F4', - "isinsv;": '\U000022F3', - "isinv;": '\U00002208', - "it;": '\U00002062', - "itilde;": '\U00000129', - "iukcy;": '\U00000456', - "iuml;": '\U000000EF', - "jcirc;": '\U00000135', - "jcy;": '\U00000439', - "jfr;": '\U0001D527', - "jmath;": '\U00000237', - "jopf;": '\U0001D55B', - "jscr;": '\U0001D4BF', - "jsercy;": '\U00000458', - "jukcy;": '\U00000454', - "kappa;": '\U000003BA', - "kappav;": '\U000003F0', - "kcedil;": '\U00000137', - "kcy;": '\U0000043A', - "kfr;": '\U0001D528', - "kgreen;": '\U00000138', - "khcy;": '\U00000445', - "kjcy;": '\U0000045C', - "kopf;": '\U0001D55C', - "kscr;": '\U0001D4C0', - "lAarr;": '\U000021DA', - "lArr;": '\U000021D0', - "lAtail;": '\U0000291B', - "lBarr;": '\U0000290E', - "lE;": '\U00002266', - "lEg;": '\U00002A8B', - "lHar;": '\U00002962', - "lacute;": '\U0000013A', - "laemptyv;": '\U000029B4', - "lagran;": '\U00002112', - "lambda;": '\U000003BB', - "lang;": '\U000027E8', - "langd;": '\U00002991', - "langle;": '\U000027E8', - "lap;": '\U00002A85', - "laquo;": '\U000000AB', - "larr;": '\U00002190', - "larrb;": '\U000021E4', - "larrbfs;": '\U0000291F', - "larrfs;": '\U0000291D', - "larrhk;": '\U000021A9', - "larrlp;": '\U000021AB', - "larrpl;": '\U00002939', - "larrsim;": '\U00002973', - "larrtl;": '\U000021A2', - "lat;": '\U00002AAB', - "latail;": '\U00002919', - "late;": '\U00002AAD', - "lbarr;": '\U0000290C', - "lbbrk;": '\U00002772', - "lbrace;": '\U0000007B', - "lbrack;": '\U0000005B', - "lbrke;": '\U0000298B', - "lbrksld;": '\U0000298F', - "lbrkslu;": '\U0000298D', - "lcaron;": '\U0000013E', - "lcedil;": '\U0000013C', - "lceil;": '\U00002308', - "lcub;": '\U0000007B', - "lcy;": '\U0000043B', - "ldca;": '\U00002936', - "ldquo;": '\U0000201C', - "ldquor;": '\U0000201E', - "ldrdhar;": '\U00002967', - "ldrushar;": '\U0000294B', - "ldsh;": '\U000021B2', - "le;": '\U00002264', - "leftarrow;": '\U00002190', - "leftarrowtail;": '\U000021A2', - "leftharpoondown;": '\U000021BD', - "leftharpoonup;": '\U000021BC', - "leftleftarrows;": '\U000021C7', - "leftrightarrow;": '\U00002194', - "leftrightarrows;": '\U000021C6', - "leftrightharpoons;": '\U000021CB', - "leftrightsquigarrow;": '\U000021AD', - "leftthreetimes;": '\U000022CB', - "leg;": '\U000022DA', - "leq;": '\U00002264', - "leqq;": '\U00002266', - "leqslant;": '\U00002A7D', - "les;": '\U00002A7D', - "lescc;": '\U00002AA8', - "lesdot;": '\U00002A7F', - "lesdoto;": '\U00002A81', - "lesdotor;": '\U00002A83', - "lesges;": '\U00002A93', - "lessapprox;": '\U00002A85', - "lessdot;": '\U000022D6', - "lesseqgtr;": '\U000022DA', - "lesseqqgtr;": '\U00002A8B', - "lessgtr;": '\U00002276', - "lesssim;": '\U00002272', - "lfisht;": '\U0000297C', - "lfloor;": '\U0000230A', - "lfr;": '\U0001D529', - "lg;": '\U00002276', - "lgE;": '\U00002A91', - "lhard;": '\U000021BD', - "lharu;": '\U000021BC', - "lharul;": '\U0000296A', - "lhblk;": '\U00002584', - "ljcy;": '\U00000459', - "ll;": '\U0000226A', - "llarr;": '\U000021C7', - "llcorner;": '\U0000231E', - "llhard;": '\U0000296B', - "lltri;": '\U000025FA', - "lmidot;": '\U00000140', - "lmoust;": '\U000023B0', - "lmoustache;": '\U000023B0', - "lnE;": '\U00002268', - "lnap;": '\U00002A89', - "lnapprox;": '\U00002A89', - "lne;": '\U00002A87', - "lneq;": '\U00002A87', - "lneqq;": '\U00002268', - "lnsim;": '\U000022E6', - "loang;": '\U000027EC', - "loarr;": '\U000021FD', - "lobrk;": '\U000027E6', - "longleftarrow;": '\U000027F5', - "longleftrightarrow;": '\U000027F7', - "longmapsto;": '\U000027FC', - "longrightarrow;": '\U000027F6', - "looparrowleft;": '\U000021AB', - "looparrowright;": '\U000021AC', - "lopar;": '\U00002985', - "lopf;": '\U0001D55D', - "loplus;": '\U00002A2D', - "lotimes;": '\U00002A34', - "lowast;": '\U00002217', - "lowbar;": '\U0000005F', - "loz;": '\U000025CA', - "lozenge;": '\U000025CA', - "lozf;": '\U000029EB', - "lpar;": '\U00000028', - "lparlt;": '\U00002993', - "lrarr;": '\U000021C6', - "lrcorner;": '\U0000231F', - "lrhar;": '\U000021CB', - "lrhard;": '\U0000296D', - "lrm;": '\U0000200E', - "lrtri;": '\U000022BF', - "lsaquo;": '\U00002039', - "lscr;": '\U0001D4C1', - "lsh;": '\U000021B0', - "lsim;": '\U00002272', - "lsime;": '\U00002A8D', - "lsimg;": '\U00002A8F', - "lsqb;": '\U0000005B', - "lsquo;": '\U00002018', - "lsquor;": '\U0000201A', - "lstrok;": '\U00000142', - "lt;": '\U0000003C', - "ltcc;": '\U00002AA6', - "ltcir;": '\U00002A79', - "ltdot;": '\U000022D6', - "lthree;": '\U000022CB', - "ltimes;": '\U000022C9', - "ltlarr;": '\U00002976', - "ltquest;": '\U00002A7B', - "ltrPar;": '\U00002996', - "ltri;": '\U000025C3', - "ltrie;": '\U000022B4', - "ltrif;": '\U000025C2', - "lurdshar;": '\U0000294A', - "luruhar;": '\U00002966', - "mDDot;": '\U0000223A', - "macr;": '\U000000AF', - "male;": '\U00002642', - "malt;": '\U00002720', - "maltese;": '\U00002720', - "map;": '\U000021A6', - "mapsto;": '\U000021A6', - "mapstodown;": '\U000021A7', - "mapstoleft;": '\U000021A4', - "mapstoup;": '\U000021A5', - "marker;": '\U000025AE', - "mcomma;": '\U00002A29', - "mcy;": '\U0000043C', - "mdash;": '\U00002014', - "measuredangle;": '\U00002221', - "mfr;": '\U0001D52A', - "mho;": '\U00002127', - "micro;": '\U000000B5', - "mid;": '\U00002223', - "midast;": '\U0000002A', - "midcir;": '\U00002AF0', - "middot;": '\U000000B7', - "minus;": '\U00002212', - "minusb;": '\U0000229F', - "minusd;": '\U00002238', - "minusdu;": '\U00002A2A', - "mlcp;": '\U00002ADB', - "mldr;": '\U00002026', - "mnplus;": '\U00002213', - "models;": '\U000022A7', - "mopf;": '\U0001D55E', - "mp;": '\U00002213', - "mscr;": '\U0001D4C2', - "mstpos;": '\U0000223E', - "mu;": '\U000003BC', - "multimap;": '\U000022B8', - "mumap;": '\U000022B8', - "nLeftarrow;": '\U000021CD', - "nLeftrightarrow;": '\U000021CE', - "nRightarrow;": '\U000021CF', - "nVDash;": '\U000022AF', - "nVdash;": '\U000022AE', - "nabla;": '\U00002207', - "nacute;": '\U00000144', - "nap;": '\U00002249', - "napos;": '\U00000149', - "napprox;": '\U00002249', - "natur;": '\U0000266E', - "natural;": '\U0000266E', - "naturals;": '\U00002115', - "nbsp;": '\U000000A0', - "ncap;": '\U00002A43', - "ncaron;": '\U00000148', - "ncedil;": '\U00000146', - "ncong;": '\U00002247', - "ncup;": '\U00002A42', - "ncy;": '\U0000043D', - "ndash;": '\U00002013', - "ne;": '\U00002260', - "neArr;": '\U000021D7', - "nearhk;": '\U00002924', - "nearr;": '\U00002197', - "nearrow;": '\U00002197', - "nequiv;": '\U00002262', - "nesear;": '\U00002928', - "nexist;": '\U00002204', - "nexists;": '\U00002204', - "nfr;": '\U0001D52B', - "nge;": '\U00002271', - "ngeq;": '\U00002271', - "ngsim;": '\U00002275', - "ngt;": '\U0000226F', - "ngtr;": '\U0000226F', - "nhArr;": '\U000021CE', - "nharr;": '\U000021AE', - "nhpar;": '\U00002AF2', - "ni;": '\U0000220B', - "nis;": '\U000022FC', - "nisd;": '\U000022FA', - "niv;": '\U0000220B', - "njcy;": '\U0000045A', - "nlArr;": '\U000021CD', - "nlarr;": '\U0000219A', - "nldr;": '\U00002025', - "nle;": '\U00002270', - "nleftarrow;": '\U0000219A', - "nleftrightarrow;": '\U000021AE', - "nleq;": '\U00002270', - "nless;": '\U0000226E', - "nlsim;": '\U00002274', - "nlt;": '\U0000226E', - "nltri;": '\U000022EA', - "nltrie;": '\U000022EC', - "nmid;": '\U00002224', - "nopf;": '\U0001D55F', - "not;": '\U000000AC', - "notin;": '\U00002209', - "notinva;": '\U00002209', - "notinvb;": '\U000022F7', - "notinvc;": '\U000022F6', - "notni;": '\U0000220C', - "notniva;": '\U0000220C', - "notnivb;": '\U000022FE', - "notnivc;": '\U000022FD', - "npar;": '\U00002226', - "nparallel;": '\U00002226', - "npolint;": '\U00002A14', - "npr;": '\U00002280', - "nprcue;": '\U000022E0', - "nprec;": '\U00002280', - "nrArr;": '\U000021CF', - "nrarr;": '\U0000219B', - "nrightarrow;": '\U0000219B', - "nrtri;": '\U000022EB', - "nrtrie;": '\U000022ED', - "nsc;": '\U00002281', - "nsccue;": '\U000022E1', - "nscr;": '\U0001D4C3', - "nshortmid;": '\U00002224', - "nshortparallel;": '\U00002226', - "nsim;": '\U00002241', - "nsime;": '\U00002244', - "nsimeq;": '\U00002244', - "nsmid;": '\U00002224', - "nspar;": '\U00002226', - "nsqsube;": '\U000022E2', - "nsqsupe;": '\U000022E3', - "nsub;": '\U00002284', - "nsube;": '\U00002288', - "nsubseteq;": '\U00002288', - "nsucc;": '\U00002281', - "nsup;": '\U00002285', - "nsupe;": '\U00002289', - "nsupseteq;": '\U00002289', - "ntgl;": '\U00002279', - "ntilde;": '\U000000F1', - "ntlg;": '\U00002278', - "ntriangleleft;": '\U000022EA', - "ntrianglelefteq;": '\U000022EC', - "ntriangleright;": '\U000022EB', - "ntrianglerighteq;": '\U000022ED', - "nu;": '\U000003BD', - "num;": '\U00000023', - "numero;": '\U00002116', - "numsp;": '\U00002007', - "nvDash;": '\U000022AD', - "nvHarr;": '\U00002904', - "nvdash;": '\U000022AC', - "nvinfin;": '\U000029DE', - "nvlArr;": '\U00002902', - "nvrArr;": '\U00002903', - "nwArr;": '\U000021D6', - "nwarhk;": '\U00002923', - "nwarr;": '\U00002196', - "nwarrow;": '\U00002196', - "nwnear;": '\U00002927', - "oS;": '\U000024C8', - "oacute;": '\U000000F3', - "oast;": '\U0000229B', - "ocir;": '\U0000229A', - "ocirc;": '\U000000F4', - "ocy;": '\U0000043E', - "odash;": '\U0000229D', - "odblac;": '\U00000151', - "odiv;": '\U00002A38', - "odot;": '\U00002299', - "odsold;": '\U000029BC', - "oelig;": '\U00000153', - "ofcir;": '\U000029BF', - "ofr;": '\U0001D52C', - "ogon;": '\U000002DB', - "ograve;": '\U000000F2', - "ogt;": '\U000029C1', - "ohbar;": '\U000029B5', - "ohm;": '\U000003A9', - "oint;": '\U0000222E', - "olarr;": '\U000021BA', - "olcir;": '\U000029BE', - "olcross;": '\U000029BB', - "oline;": '\U0000203E', - "olt;": '\U000029C0', - "omacr;": '\U0000014D', - "omega;": '\U000003C9', - "omicron;": '\U000003BF', - "omid;": '\U000029B6', - "ominus;": '\U00002296', - "oopf;": '\U0001D560', - "opar;": '\U000029B7', - "operp;": '\U000029B9', - "oplus;": '\U00002295', - "or;": '\U00002228', - "orarr;": '\U000021BB', - "ord;": '\U00002A5D', - "order;": '\U00002134', - "orderof;": '\U00002134', - "ordf;": '\U000000AA', - "ordm;": '\U000000BA', - "origof;": '\U000022B6', - "oror;": '\U00002A56', - "orslope;": '\U00002A57', - "orv;": '\U00002A5B', - "oscr;": '\U00002134', - "oslash;": '\U000000F8', - "osol;": '\U00002298', - "otilde;": '\U000000F5', - "otimes;": '\U00002297', - "otimesas;": '\U00002A36', - "ouml;": '\U000000F6', - "ovbar;": '\U0000233D', - "par;": '\U00002225', - "para;": '\U000000B6', - "parallel;": '\U00002225', - "parsim;": '\U00002AF3', - "parsl;": '\U00002AFD', - "part;": '\U00002202', - "pcy;": '\U0000043F', - "percnt;": '\U00000025', - "period;": '\U0000002E', - "permil;": '\U00002030', - "perp;": '\U000022A5', - "pertenk;": '\U00002031', - "pfr;": '\U0001D52D', - "phi;": '\U000003C6', - "phiv;": '\U000003D5', - "phmmat;": '\U00002133', - "phone;": '\U0000260E', - "pi;": '\U000003C0', - "pitchfork;": '\U000022D4', - "piv;": '\U000003D6', - "planck;": '\U0000210F', - "planckh;": '\U0000210E', - "plankv;": '\U0000210F', - "plus;": '\U0000002B', - "plusacir;": '\U00002A23', - "plusb;": '\U0000229E', - "pluscir;": '\U00002A22', - "plusdo;": '\U00002214', - "plusdu;": '\U00002A25', - "pluse;": '\U00002A72', - "plusmn;": '\U000000B1', - "plussim;": '\U00002A26', - "plustwo;": '\U00002A27', - "pm;": '\U000000B1', - "pointint;": '\U00002A15', - "popf;": '\U0001D561', - "pound;": '\U000000A3', - "pr;": '\U0000227A', - "prE;": '\U00002AB3', - "prap;": '\U00002AB7', - "prcue;": '\U0000227C', - "pre;": '\U00002AAF', - "prec;": '\U0000227A', - "precapprox;": '\U00002AB7', - "preccurlyeq;": '\U0000227C', - "preceq;": '\U00002AAF', - "precnapprox;": '\U00002AB9', - "precneqq;": '\U00002AB5', - "precnsim;": '\U000022E8', - "precsim;": '\U0000227E', - "prime;": '\U00002032', - "primes;": '\U00002119', - "prnE;": '\U00002AB5', - "prnap;": '\U00002AB9', - "prnsim;": '\U000022E8', - "prod;": '\U0000220F', - "profalar;": '\U0000232E', - "profline;": '\U00002312', - "profsurf;": '\U00002313', - "prop;": '\U0000221D', - "propto;": '\U0000221D', - "prsim;": '\U0000227E', - "prurel;": '\U000022B0', - "pscr;": '\U0001D4C5', - "psi;": '\U000003C8', - "puncsp;": '\U00002008', - "qfr;": '\U0001D52E', - "qint;": '\U00002A0C', - "qopf;": '\U0001D562', - "qprime;": '\U00002057', - "qscr;": '\U0001D4C6', - "quaternions;": '\U0000210D', - "quatint;": '\U00002A16', - "quest;": '\U0000003F', - "questeq;": '\U0000225F', - "quot;": '\U00000022', - "rAarr;": '\U000021DB', - "rArr;": '\U000021D2', - "rAtail;": '\U0000291C', - "rBarr;": '\U0000290F', - "rHar;": '\U00002964', - "racute;": '\U00000155', - "radic;": '\U0000221A', - "raemptyv;": '\U000029B3', - "rang;": '\U000027E9', - "rangd;": '\U00002992', - "range;": '\U000029A5', - "rangle;": '\U000027E9', - "raquo;": '\U000000BB', - "rarr;": '\U00002192', - "rarrap;": '\U00002975', - "rarrb;": '\U000021E5', - "rarrbfs;": '\U00002920', - "rarrc;": '\U00002933', - "rarrfs;": '\U0000291E', - "rarrhk;": '\U000021AA', - "rarrlp;": '\U000021AC', - "rarrpl;": '\U00002945', - "rarrsim;": '\U00002974', - "rarrtl;": '\U000021A3', - "rarrw;": '\U0000219D', - "ratail;": '\U0000291A', - "ratio;": '\U00002236', - "rationals;": '\U0000211A', - "rbarr;": '\U0000290D', - "rbbrk;": '\U00002773', - "rbrace;": '\U0000007D', - "rbrack;": '\U0000005D', - "rbrke;": '\U0000298C', - "rbrksld;": '\U0000298E', - "rbrkslu;": '\U00002990', - "rcaron;": '\U00000159', - "rcedil;": '\U00000157', - "rceil;": '\U00002309', - "rcub;": '\U0000007D', - "rcy;": '\U00000440', - "rdca;": '\U00002937', - "rdldhar;": '\U00002969', - "rdquo;": '\U0000201D', - "rdquor;": '\U0000201D', - "rdsh;": '\U000021B3', - "real;": '\U0000211C', - "realine;": '\U0000211B', - "realpart;": '\U0000211C', - "reals;": '\U0000211D', - "rect;": '\U000025AD', - "reg;": '\U000000AE', - "rfisht;": '\U0000297D', - "rfloor;": '\U0000230B', - "rfr;": '\U0001D52F', - "rhard;": '\U000021C1', - "rharu;": '\U000021C0', - "rharul;": '\U0000296C', - "rho;": '\U000003C1', - "rhov;": '\U000003F1', - "rightarrow;": '\U00002192', - "rightarrowtail;": '\U000021A3', - "rightharpoondown;": '\U000021C1', - "rightharpoonup;": '\U000021C0', - "rightleftarrows;": '\U000021C4', - "rightleftharpoons;": '\U000021CC', - "rightrightarrows;": '\U000021C9', - "rightsquigarrow;": '\U0000219D', - "rightthreetimes;": '\U000022CC', - "ring;": '\U000002DA', - "risingdotseq;": '\U00002253', - "rlarr;": '\U000021C4', - "rlhar;": '\U000021CC', - "rlm;": '\U0000200F', - "rmoust;": '\U000023B1', - "rmoustache;": '\U000023B1', - "rnmid;": '\U00002AEE', - "roang;": '\U000027ED', - "roarr;": '\U000021FE', - "robrk;": '\U000027E7', - "ropar;": '\U00002986', - "ropf;": '\U0001D563', - "roplus;": '\U00002A2E', - "rotimes;": '\U00002A35', - "rpar;": '\U00000029', - "rpargt;": '\U00002994', - "rppolint;": '\U00002A12', - "rrarr;": '\U000021C9', - "rsaquo;": '\U0000203A', - "rscr;": '\U0001D4C7', - "rsh;": '\U000021B1', - "rsqb;": '\U0000005D', - "rsquo;": '\U00002019', - "rsquor;": '\U00002019', - "rthree;": '\U000022CC', - "rtimes;": '\U000022CA', - "rtri;": '\U000025B9', - "rtrie;": '\U000022B5', - "rtrif;": '\U000025B8', - "rtriltri;": '\U000029CE', - "ruluhar;": '\U00002968', - "rx;": '\U0000211E', - "sacute;": '\U0000015B', - "sbquo;": '\U0000201A', - "sc;": '\U0000227B', - "scE;": '\U00002AB4', - "scap;": '\U00002AB8', - "scaron;": '\U00000161', - "sccue;": '\U0000227D', - "sce;": '\U00002AB0', - "scedil;": '\U0000015F', - "scirc;": '\U0000015D', - "scnE;": '\U00002AB6', - "scnap;": '\U00002ABA', - "scnsim;": '\U000022E9', - "scpolint;": '\U00002A13', - "scsim;": '\U0000227F', - "scy;": '\U00000441', - "sdot;": '\U000022C5', - "sdotb;": '\U000022A1', - "sdote;": '\U00002A66', - "seArr;": '\U000021D8', - "searhk;": '\U00002925', - "searr;": '\U00002198', - "searrow;": '\U00002198', - "sect;": '\U000000A7', - "semi;": '\U0000003B', - "seswar;": '\U00002929', - "setminus;": '\U00002216', - "setmn;": '\U00002216', - "sext;": '\U00002736', - "sfr;": '\U0001D530', - "sfrown;": '\U00002322', - "sharp;": '\U0000266F', - "shchcy;": '\U00000449', - "shcy;": '\U00000448', - "shortmid;": '\U00002223', - "shortparallel;": '\U00002225', - "shy;": '\U000000AD', - "sigma;": '\U000003C3', - "sigmaf;": '\U000003C2', - "sigmav;": '\U000003C2', - "sim;": '\U0000223C', - "simdot;": '\U00002A6A', - "sime;": '\U00002243', - "simeq;": '\U00002243', - "simg;": '\U00002A9E', - "simgE;": '\U00002AA0', - "siml;": '\U00002A9D', - "simlE;": '\U00002A9F', - "simne;": '\U00002246', - "simplus;": '\U00002A24', - "simrarr;": '\U00002972', - "slarr;": '\U00002190', - "smallsetminus;": '\U00002216', - "smashp;": '\U00002A33', - "smeparsl;": '\U000029E4', - "smid;": '\U00002223', - "smile;": '\U00002323', - "smt;": '\U00002AAA', - "smte;": '\U00002AAC', - "softcy;": '\U0000044C', - "sol;": '\U0000002F', - "solb;": '\U000029C4', - "solbar;": '\U0000233F', - "sopf;": '\U0001D564', - "spades;": '\U00002660', - "spadesuit;": '\U00002660', - "spar;": '\U00002225', - "sqcap;": '\U00002293', - "sqcup;": '\U00002294', - "sqsub;": '\U0000228F', - "sqsube;": '\U00002291', - "sqsubset;": '\U0000228F', - "sqsubseteq;": '\U00002291', - "sqsup;": '\U00002290', - "sqsupe;": '\U00002292', - "sqsupset;": '\U00002290', - "sqsupseteq;": '\U00002292', - "squ;": '\U000025A1', - "square;": '\U000025A1', - "squarf;": '\U000025AA', - "squf;": '\U000025AA', - "srarr;": '\U00002192', - "sscr;": '\U0001D4C8', - "ssetmn;": '\U00002216', - "ssmile;": '\U00002323', - "sstarf;": '\U000022C6', - "star;": '\U00002606', - "starf;": '\U00002605', - "straightepsilon;": '\U000003F5', - "straightphi;": '\U000003D5', - "strns;": '\U000000AF', - "sub;": '\U00002282', - "subE;": '\U00002AC5', - "subdot;": '\U00002ABD', - "sube;": '\U00002286', - "subedot;": '\U00002AC3', - "submult;": '\U00002AC1', - "subnE;": '\U00002ACB', - "subne;": '\U0000228A', - "subplus;": '\U00002ABF', - "subrarr;": '\U00002979', - "subset;": '\U00002282', - "subseteq;": '\U00002286', - "subseteqq;": '\U00002AC5', - "subsetneq;": '\U0000228A', - "subsetneqq;": '\U00002ACB', - "subsim;": '\U00002AC7', - "subsub;": '\U00002AD5', - "subsup;": '\U00002AD3', - "succ;": '\U0000227B', - "succapprox;": '\U00002AB8', - "succcurlyeq;": '\U0000227D', - "succeq;": '\U00002AB0', - "succnapprox;": '\U00002ABA', - "succneqq;": '\U00002AB6', - "succnsim;": '\U000022E9', - "succsim;": '\U0000227F', - "sum;": '\U00002211', - "sung;": '\U0000266A', - "sup;": '\U00002283', - "sup1;": '\U000000B9', - "sup2;": '\U000000B2', - "sup3;": '\U000000B3', - "supE;": '\U00002AC6', - "supdot;": '\U00002ABE', - "supdsub;": '\U00002AD8', - "supe;": '\U00002287', - "supedot;": '\U00002AC4', - "suphsol;": '\U000027C9', - "suphsub;": '\U00002AD7', - "suplarr;": '\U0000297B', - "supmult;": '\U00002AC2', - "supnE;": '\U00002ACC', - "supne;": '\U0000228B', - "supplus;": '\U00002AC0', - "supset;": '\U00002283', - "supseteq;": '\U00002287', - "supseteqq;": '\U00002AC6', - "supsetneq;": '\U0000228B', - "supsetneqq;": '\U00002ACC', - "supsim;": '\U00002AC8', - "supsub;": '\U00002AD4', - "supsup;": '\U00002AD6', - "swArr;": '\U000021D9', - "swarhk;": '\U00002926', - "swarr;": '\U00002199', - "swarrow;": '\U00002199', - "swnwar;": '\U0000292A', - "szlig;": '\U000000DF', - "target;": '\U00002316', - "tau;": '\U000003C4', - "tbrk;": '\U000023B4', - "tcaron;": '\U00000165', - "tcedil;": '\U00000163', - "tcy;": '\U00000442', - "tdot;": '\U000020DB', - "telrec;": '\U00002315', - "tfr;": '\U0001D531', - "there4;": '\U00002234', - "therefore;": '\U00002234', - "theta;": '\U000003B8', - "thetasym;": '\U000003D1', - "thetav;": '\U000003D1', - "thickapprox;": '\U00002248', - "thicksim;": '\U0000223C', - "thinsp;": '\U00002009', - "thkap;": '\U00002248', - "thksim;": '\U0000223C', - "thorn;": '\U000000FE', - "tilde;": '\U000002DC', - "times;": '\U000000D7', - "timesb;": '\U000022A0', - "timesbar;": '\U00002A31', - "timesd;": '\U00002A30', - "tint;": '\U0000222D', - "toea;": '\U00002928', - "top;": '\U000022A4', - "topbot;": '\U00002336', - "topcir;": '\U00002AF1', - "topf;": '\U0001D565', - "topfork;": '\U00002ADA', - "tosa;": '\U00002929', - "tprime;": '\U00002034', - "trade;": '\U00002122', - "triangle;": '\U000025B5', - "triangledown;": '\U000025BF', - "triangleleft;": '\U000025C3', - "trianglelefteq;": '\U000022B4', - "triangleq;": '\U0000225C', - "triangleright;": '\U000025B9', - "trianglerighteq;": '\U000022B5', - "tridot;": '\U000025EC', - "trie;": '\U0000225C', - "triminus;": '\U00002A3A', - "triplus;": '\U00002A39', - "trisb;": '\U000029CD', - "tritime;": '\U00002A3B', - "trpezium;": '\U000023E2', - "tscr;": '\U0001D4C9', - "tscy;": '\U00000446', - "tshcy;": '\U0000045B', - "tstrok;": '\U00000167', - "twixt;": '\U0000226C', - "twoheadleftarrow;": '\U0000219E', - "twoheadrightarrow;": '\U000021A0', - "uArr;": '\U000021D1', - "uHar;": '\U00002963', - "uacute;": '\U000000FA', - "uarr;": '\U00002191', - "ubrcy;": '\U0000045E', - "ubreve;": '\U0000016D', - "ucirc;": '\U000000FB', - "ucy;": '\U00000443', - "udarr;": '\U000021C5', - "udblac;": '\U00000171', - "udhar;": '\U0000296E', - "ufisht;": '\U0000297E', - "ufr;": '\U0001D532', - "ugrave;": '\U000000F9', - "uharl;": '\U000021BF', - "uharr;": '\U000021BE', - "uhblk;": '\U00002580', - "ulcorn;": '\U0000231C', - "ulcorner;": '\U0000231C', - "ulcrop;": '\U0000230F', - "ultri;": '\U000025F8', - "umacr;": '\U0000016B', - "uml;": '\U000000A8', - "uogon;": '\U00000173', - "uopf;": '\U0001D566', - "uparrow;": '\U00002191', - "updownarrow;": '\U00002195', - "upharpoonleft;": '\U000021BF', - "upharpoonright;": '\U000021BE', - "uplus;": '\U0000228E', - "upsi;": '\U000003C5', - "upsih;": '\U000003D2', - "upsilon;": '\U000003C5', - "upuparrows;": '\U000021C8', - "urcorn;": '\U0000231D', - "urcorner;": '\U0000231D', - "urcrop;": '\U0000230E', - "uring;": '\U0000016F', - "urtri;": '\U000025F9', - "uscr;": '\U0001D4CA', - "utdot;": '\U000022F0', - "utilde;": '\U00000169', - "utri;": '\U000025B5', - "utrif;": '\U000025B4', - "uuarr;": '\U000021C8', - "uuml;": '\U000000FC', - "uwangle;": '\U000029A7', - "vArr;": '\U000021D5', - "vBar;": '\U00002AE8', - "vBarv;": '\U00002AE9', - "vDash;": '\U000022A8', - "vangrt;": '\U0000299C', - "varepsilon;": '\U000003F5', - "varkappa;": '\U000003F0', - "varnothing;": '\U00002205', - "varphi;": '\U000003D5', - "varpi;": '\U000003D6', - "varpropto;": '\U0000221D', - "varr;": '\U00002195', - "varrho;": '\U000003F1', - "varsigma;": '\U000003C2', - "vartheta;": '\U000003D1', - "vartriangleleft;": '\U000022B2', - "vartriangleright;": '\U000022B3', - "vcy;": '\U00000432', - "vdash;": '\U000022A2', - "vee;": '\U00002228', - "veebar;": '\U000022BB', - "veeeq;": '\U0000225A', - "vellip;": '\U000022EE', - "verbar;": '\U0000007C', - "vert;": '\U0000007C', - "vfr;": '\U0001D533', - "vltri;": '\U000022B2', - "vopf;": '\U0001D567', - "vprop;": '\U0000221D', - "vrtri;": '\U000022B3', - "vscr;": '\U0001D4CB', - "vzigzag;": '\U0000299A', - "wcirc;": '\U00000175', - "wedbar;": '\U00002A5F', - "wedge;": '\U00002227', - "wedgeq;": '\U00002259', - "weierp;": '\U00002118', - "wfr;": '\U0001D534', - "wopf;": '\U0001D568', - "wp;": '\U00002118', - "wr;": '\U00002240', - "wreath;": '\U00002240', - "wscr;": '\U0001D4CC', - "xcap;": '\U000022C2', - "xcirc;": '\U000025EF', - "xcup;": '\U000022C3', - "xdtri;": '\U000025BD', - "xfr;": '\U0001D535', - "xhArr;": '\U000027FA', - "xharr;": '\U000027F7', - "xi;": '\U000003BE', - "xlArr;": '\U000027F8', - "xlarr;": '\U000027F5', - "xmap;": '\U000027FC', - "xnis;": '\U000022FB', - "xodot;": '\U00002A00', - "xopf;": '\U0001D569', - "xoplus;": '\U00002A01', - "xotime;": '\U00002A02', - "xrArr;": '\U000027F9', - "xrarr;": '\U000027F6', - "xscr;": '\U0001D4CD', - "xsqcup;": '\U00002A06', - "xuplus;": '\U00002A04', - "xutri;": '\U000025B3', - "xvee;": '\U000022C1', - "xwedge;": '\U000022C0', - "yacute;": '\U000000FD', - "yacy;": '\U0000044F', - "ycirc;": '\U00000177', - "ycy;": '\U0000044B', - "yen;": '\U000000A5', - "yfr;": '\U0001D536', - "yicy;": '\U00000457', - "yopf;": '\U0001D56A', - "yscr;": '\U0001D4CE', - "yucy;": '\U0000044E', - "yuml;": '\U000000FF', - "zacute;": '\U0000017A', - "zcaron;": '\U0000017E', - "zcy;": '\U00000437', - "zdot;": '\U0000017C', - "zeetrf;": '\U00002128', - "zeta;": '\U000003B6', - "zfr;": '\U0001D537', - "zhcy;": '\U00000436', - "zigrarr;": '\U000021DD', - "zopf;": '\U0001D56B', - "zscr;": '\U0001D4CF', - "zwj;": '\U0000200D', - "zwnj;": '\U0000200C', - "AElig": '\U000000C6', - "AMP": '\U00000026', - "Aacute": '\U000000C1', - "Acirc": '\U000000C2', - "Agrave": '\U000000C0', - "Aring": '\U000000C5', - "Atilde": '\U000000C3', - "Auml": '\U000000C4', - "COPY": '\U000000A9', - "Ccedil": '\U000000C7', - "ETH": '\U000000D0', - "Eacute": '\U000000C9', - "Ecirc": '\U000000CA', - "Egrave": '\U000000C8', - "Euml": '\U000000CB', - "GT": '\U0000003E', - "Iacute": '\U000000CD', - "Icirc": '\U000000CE', - "Igrave": '\U000000CC', - "Iuml": '\U000000CF', - "LT": '\U0000003C', - "Ntilde": '\U000000D1', - "Oacute": '\U000000D3', - "Ocirc": '\U000000D4', - "Ograve": '\U000000D2', - "Oslash": '\U000000D8', - "Otilde": '\U000000D5', - "Ouml": '\U000000D6', - "QUOT": '\U00000022', - "REG": '\U000000AE', - "THORN": '\U000000DE', - "Uacute": '\U000000DA', - "Ucirc": '\U000000DB', - "Ugrave": '\U000000D9', - "Uuml": '\U000000DC', - "Yacute": '\U000000DD', - "aacute": '\U000000E1', - "acirc": '\U000000E2', - "acute": '\U000000B4', - "aelig": '\U000000E6', - "agrave": '\U000000E0', - "amp": '\U00000026', - "aring": '\U000000E5', - "atilde": '\U000000E3', - "auml": '\U000000E4', - "brvbar": '\U000000A6', - "ccedil": '\U000000E7', - "cedil": '\U000000B8', - "cent": '\U000000A2', - "copy": '\U000000A9', - "curren": '\U000000A4', - "deg": '\U000000B0', - "divide": '\U000000F7', - "eacute": '\U000000E9', - "ecirc": '\U000000EA', - "egrave": '\U000000E8', - "eth": '\U000000F0', - "euml": '\U000000EB', - "frac12": '\U000000BD', - "frac14": '\U000000BC', - "frac34": '\U000000BE', - "gt": '\U0000003E', - "iacute": '\U000000ED', - "icirc": '\U000000EE', - "iexcl": '\U000000A1', - "igrave": '\U000000EC', - "iquest": '\U000000BF', - "iuml": '\U000000EF', - "laquo": '\U000000AB', - "lt": '\U0000003C', - "macr": '\U000000AF', - "micro": '\U000000B5', - "middot": '\U000000B7', - "nbsp": '\U000000A0', - "not": '\U000000AC', - "ntilde": '\U000000F1', - "oacute": '\U000000F3', - "ocirc": '\U000000F4', - "ograve": '\U000000F2', - "ordf": '\U000000AA', - "ordm": '\U000000BA', - "oslash": '\U000000F8', - "otilde": '\U000000F5', - "ouml": '\U000000F6', - "para": '\U000000B6', - "plusmn": '\U000000B1', - "pound": '\U000000A3', - "quot": '\U00000022', - "raquo": '\U000000BB', - "reg": '\U000000AE', - "sect": '\U000000A7', - "shy": '\U000000AD', - "sup1": '\U000000B9', - "sup2": '\U000000B2', - "sup3": '\U000000B3', - "szlig": '\U000000DF', - "thorn": '\U000000FE', - "times": '\U000000D7', - "uacute": '\U000000FA', - "ucirc": '\U000000FB', - "ugrave": '\U000000F9', - "uml": '\U000000A8', - "uuml": '\U000000FC', - "yacute": '\U000000FD', - "yen": '\U000000A5', - "yuml": '\U000000FF', -} - -// HTML entities that are two unicode codepoints. -var entity2 = map[string][2]rune{ - // TODO(nigeltao): Handle replacements that are wider than their names. - // "nLt;": {'\u226A', '\u20D2'}, - // "nGt;": {'\u226B', '\u20D2'}, - "NotEqualTilde;": {'\u2242', '\u0338'}, - "NotGreaterFullEqual;": {'\u2267', '\u0338'}, - "NotGreaterGreater;": {'\u226B', '\u0338'}, - "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, - "NotHumpDownHump;": {'\u224E', '\u0338'}, - "NotHumpEqual;": {'\u224F', '\u0338'}, - "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, - "NotLessLess;": {'\u226A', '\u0338'}, - "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, - "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, - "NotNestedLessLess;": {'\u2AA1', '\u0338'}, - "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, - "NotRightTriangleBar;": {'\u29D0', '\u0338'}, - "NotSquareSubset;": {'\u228F', '\u0338'}, - "NotSquareSuperset;": {'\u2290', '\u0338'}, - "NotSubset;": {'\u2282', '\u20D2'}, - "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, - "NotSucceedsTilde;": {'\u227F', '\u0338'}, - "NotSuperset;": {'\u2283', '\u20D2'}, - "ThickSpace;": {'\u205F', '\u200A'}, - "acE;": {'\u223E', '\u0333'}, - "bne;": {'\u003D', '\u20E5'}, - "bnequiv;": {'\u2261', '\u20E5'}, - "caps;": {'\u2229', '\uFE00'}, - "cups;": {'\u222A', '\uFE00'}, - "fjlig;": {'\u0066', '\u006A'}, - "gesl;": {'\u22DB', '\uFE00'}, - "gvertneqq;": {'\u2269', '\uFE00'}, - "gvnE;": {'\u2269', '\uFE00'}, - "lates;": {'\u2AAD', '\uFE00'}, - "lesg;": {'\u22DA', '\uFE00'}, - "lvertneqq;": {'\u2268', '\uFE00'}, - "lvnE;": {'\u2268', '\uFE00'}, - "nGg;": {'\u22D9', '\u0338'}, - "nGtv;": {'\u226B', '\u0338'}, - "nLl;": {'\u22D8', '\u0338'}, - "nLtv;": {'\u226A', '\u0338'}, - "nang;": {'\u2220', '\u20D2'}, - "napE;": {'\u2A70', '\u0338'}, - "napid;": {'\u224B', '\u0338'}, - "nbump;": {'\u224E', '\u0338'}, - "nbumpe;": {'\u224F', '\u0338'}, - "ncongdot;": {'\u2A6D', '\u0338'}, - "nedot;": {'\u2250', '\u0338'}, - "nesim;": {'\u2242', '\u0338'}, - "ngE;": {'\u2267', '\u0338'}, - "ngeqq;": {'\u2267', '\u0338'}, - "ngeqslant;": {'\u2A7E', '\u0338'}, - "nges;": {'\u2A7E', '\u0338'}, - "nlE;": {'\u2266', '\u0338'}, - "nleqq;": {'\u2266', '\u0338'}, - "nleqslant;": {'\u2A7D', '\u0338'}, - "nles;": {'\u2A7D', '\u0338'}, - "notinE;": {'\u22F9', '\u0338'}, - "notindot;": {'\u22F5', '\u0338'}, - "nparsl;": {'\u2AFD', '\u20E5'}, - "npart;": {'\u2202', '\u0338'}, - "npre;": {'\u2AAF', '\u0338'}, - "npreceq;": {'\u2AAF', '\u0338'}, - "nrarrc;": {'\u2933', '\u0338'}, - "nrarrw;": {'\u219D', '\u0338'}, - "nsce;": {'\u2AB0', '\u0338'}, - "nsubE;": {'\u2AC5', '\u0338'}, - "nsubset;": {'\u2282', '\u20D2'}, - "nsubseteqq;": {'\u2AC5', '\u0338'}, - "nsucceq;": {'\u2AB0', '\u0338'}, - "nsupE;": {'\u2AC6', '\u0338'}, - "nsupset;": {'\u2283', '\u20D2'}, - "nsupseteqq;": {'\u2AC6', '\u0338'}, - "nvap;": {'\u224D', '\u20D2'}, - "nvge;": {'\u2265', '\u20D2'}, - "nvgt;": {'\u003E', '\u20D2'}, - "nvle;": {'\u2264', '\u20D2'}, - "nvlt;": {'\u003C', '\u20D2'}, - "nvltrie;": {'\u22B4', '\u20D2'}, - "nvrtrie;": {'\u22B5', '\u20D2'}, - "nvsim;": {'\u223C', '\u20D2'}, - "race;": {'\u223D', '\u0331'}, - "smtes;": {'\u2AAC', '\uFE00'}, - "sqcaps;": {'\u2293', '\uFE00'}, - "sqcups;": {'\u2294', '\uFE00'}, - "varsubsetneq;": {'\u228A', '\uFE00'}, - "varsubsetneqq;": {'\u2ACB', '\uFE00'}, - "varsupsetneq;": {'\u228B', '\uFE00'}, - "varsupsetneqq;": {'\u2ACC', '\uFE00'}, - "vnsub;": {'\u2282', '\u20D2'}, - "vnsup;": {'\u2283', '\u20D2'}, - "vsubnE;": {'\u2ACB', '\uFE00'}, - "vsubne;": {'\u228A', '\uFE00'}, - "vsupnE;": {'\u2ACC', '\uFE00'}, - "vsupne;": {'\u228B', '\uFE00'}, -} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go deleted file mode 100644 index 04c6bec2..00000000 --- a/vendor/golang.org/x/net/html/escape.go +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// These replacements permit compatibility with old numeric entities that -// assumed Windows-1252 encoding. -// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference -var replacementTable = [...]rune{ - '\u20AC', // First entry is what 0x80 should be replaced with. - '\u0081', - '\u201A', - '\u0192', - '\u201E', - '\u2026', - '\u2020', - '\u2021', - '\u02C6', - '\u2030', - '\u0160', - '\u2039', - '\u0152', - '\u008D', - '\u017D', - '\u008F', - '\u0090', - '\u2018', - '\u2019', - '\u201C', - '\u201D', - '\u2022', - '\u2013', - '\u2014', - '\u02DC', - '\u2122', - '\u0161', - '\u203A', - '\u0153', - '\u009D', - '\u017E', - '\u0178', // Last entry is 0x9F. - // 0x00->'\uFFFD' is handled programmatically. - // 0x0D->'\u000D' is a no-op. -} - -// unescapeEntity reads an entity like "<" from b[src:] and writes the -// corresponding "<" to b[dst:], returning the incremented dst and src cursors. -// Precondition: b[src] == '&' && dst <= src. -// attribute should be true if parsing an attribute value. -func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { - // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference - - // i starts at 1 because we already know that s[0] == '&'. - i, s := 1, b[src:] - - if len(s) <= 1 { - b[dst] = b[src] - return dst + 1, src + 1 - } - - if s[i] == '#' { - if len(s) <= 3 { // We need to have at least "&#.". - b[dst] = b[src] - return dst + 1, src + 1 - } - i++ - c := s[i] - hex := false - if c == 'x' || c == 'X' { - hex = true - i++ - } - - x := '\x00' - for i < len(s) { - c = s[i] - i++ - if hex { - if '0' <= c && c <= '9' { - x = 16*x + rune(c) - '0' - continue - } else if 'a' <= c && c <= 'f' { - x = 16*x + rune(c) - 'a' + 10 - continue - } else if 'A' <= c && c <= 'F' { - x = 16*x + rune(c) - 'A' + 10 - continue - } - } else if '0' <= c && c <= '9' { - x = 10*x + rune(c) - '0' - continue - } - if c != ';' { - i-- - } - break - } - - if i <= 3 { // No characters matched. - b[dst] = b[src] - return dst + 1, src + 1 - } - - if 0x80 <= x && x <= 0x9F { - // Replace characters from Windows-1252 with UTF-8 equivalents. - x = replacementTable[x-0x80] - } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { - // Replace invalid characters with the replacement character. - x = '\uFFFD' - } - - return dst + utf8.EncodeRune(b[dst:], x), src + i - } - - // Consume the maximum number of characters possible, with the - // consumed characters matching one of the named references. - - for i < len(s) { - c := s[i] - i++ - // Lower-cased characters are more common in entities, so we check for them first. - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - continue - } - if c != ';' { - i-- - } - break - } - - entityName := string(s[1:i]) - if entityName == "" { - // No-op. - } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { - // No-op. - } else if x := entity[entityName]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + i - } else if x := entity2[entityName]; x[0] != 0 { - dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) - return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i - } else if !attribute { - maxLen := len(entityName) - 1 - if maxLen > longestEntityWithoutSemicolon { - maxLen = longestEntityWithoutSemicolon - } - for j := maxLen; j > 1; j-- { - if x := entity[entityName[:j]]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 - } - } - } - - dst1, src1 = dst+i, src+i - copy(b[dst:dst1], b[src:src1]) - return dst1, src1 -} - -// unescape unescapes b's entities in-place, so that "a<b" becomes "a' byte that, per above, we'd like to avoid escaping unless we have to. -// -// Studying the summary table (and T actions in its '>' column) closely, we -// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the -// start of the comment data. State 52 is after a '!'. The other three states -// are after a '-'. -// -// Our algorithm is thus to escape every '&' and to escape '>' if and only if: -// - The '>' is after a '!' or '-' (in the unescaped data) or -// - The '>' is at the start of the comment data (after the opening ""); err != nil { - return err - } - return nil - case DoctypeNode: - if _, err := w.WriteString("') - case RawNode: - _, err := w.WriteString(n.Data) - return err - default: - return errors.New("html: unknown node type") - } - - // Render the opening tag. - if err := w.WriteByte('<'); err != nil { - return err - } - if _, err := w.WriteString(n.Data); err != nil { - return err - } - for _, a := range n.Attr { - if err := w.WriteByte(' '); err != nil { - return err - } - if a.Namespace != "" { - if _, err := w.WriteString(a.Namespace); err != nil { - return err - } - if err := w.WriteByte(':'); err != nil { - return err - } - } - if _, err := w.WriteString(a.Key); err != nil { - return err - } - if _, err := w.WriteString(`="`); err != nil { - return err - } - if err := escape(w, a.Val); err != nil { - return err - } - if err := w.WriteByte('"'); err != nil { - return err - } - } - if voidElements[n.Data] { - if n.FirstChild != nil { - return fmt.Errorf("html: void element <%s> has child nodes", n.Data) - } - _, err := w.WriteString("/>") - return err - } - if err := w.WriteByte('>'); err != nil { - return err - } - - // Add initial newline where there is danger of a newline beging ignored. - if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") { - switch n.Data { - case "pre", "listing", "textarea": - if err := w.WriteByte('\n'); err != nil { - return err - } - } - } - - // Render any child nodes - if childTextNodesAreLiteral(n) { - for c := n.FirstChild; c != nil; c = c.NextSibling { - if c.Type == TextNode { - if _, err := w.WriteString(c.Data); err != nil { - return err - } - } else { - if err := render1(w, c); err != nil { - return err - } - } - } - if n.Data == "plaintext" { - // Don't render anything else. must be the - // last element in the file, with no closing tag. - return plaintextAbort - } - } else { - for c := n.FirstChild; c != nil; c = c.NextSibling { - if err := render1(w, c); err != nil { - return err - } - } - } - - // Render the </xxx> closing tag. - if _, err := w.WriteString("</"); err != nil { - return err - } - if _, err := w.WriteString(n.Data); err != nil { - return err - } - return w.WriteByte('>') -} - -func childTextNodesAreLiteral(n *Node) bool { - // Per WHATWG HTML 13.3, if the parent of the current node is a style, - // script, xmp, iframe, noembed, noframes, or plaintext element, and the - // current node is a text node, append the value of the node's data - // literally. The specification is not explicit about it, but we only - // enforce this if we are in the HTML namespace (i.e. when the namespace is - // ""). - // NOTE: we also always include noscript elements, although the - // specification states that they should only be rendered as such if - // scripting is enabled for the node (which is not something we track). - if n.Namespace != "" { - return false - } - switch n.Data { - case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": - return true - default: - return false - } -} - -// writeQuoted writes s to w surrounded by quotes. Normally it will use double -// quotes, but if s contains a double quote, it will use single quotes. -// It is used for writing the identifiers in a doctype declaration. -// In valid HTML, they can't contain both types of quotes. -func writeQuoted(w writer, s string) error { - var q byte = '"' - if strings.Contains(s, `"`) { - q = '\'' - } - if err := w.WriteByte(q); err != nil { - return err - } - if _, err := w.WriteString(s); err != nil { - return err - } - if err := w.WriteByte(q); err != nil { - return err - } - return nil -} - -// Section 12.1.2, "Elements", gives this list of void elements. Void elements -// are those that can't have any contents. -var voidElements = map[string]bool{ - "area": true, - "base": true, - "br": true, - "col": true, - "embed": true, - "hr": true, - "img": true, - "input": true, - "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. - "link": true, - "meta": true, - "param": true, - "source": true, - "track": true, - "wbr": true, -} diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go deleted file mode 100644 index de67f938..00000000 --- a/vendor/golang.org/x/net/html/token.go +++ /dev/null @@ -1,1268 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "bytes" - "errors" - "io" - "strconv" - "strings" - - "golang.org/x/net/html/atom" -) - -// A TokenType is the type of a Token. -type TokenType uint32 - -const ( - // ErrorToken means that an error occurred during tokenization. - ErrorToken TokenType = iota - // TextToken means a text node. - TextToken - // A StartTagToken looks like <a>. - StartTagToken - // An EndTagToken looks like </a>. - EndTagToken - // A SelfClosingTagToken tag looks like <br/>. - SelfClosingTagToken - // A CommentToken looks like <!--x-->. - CommentToken - // A DoctypeToken looks like <!DOCTYPE x> - DoctypeToken -) - -// ErrBufferExceeded means that the buffering limit was exceeded. -var ErrBufferExceeded = errors.New("max buffer exceeded") - -// String returns a string representation of the TokenType. -func (t TokenType) String() string { - switch t { - case ErrorToken: - return "Error" - case TextToken: - return "Text" - case StartTagToken: - return "StartTag" - case EndTagToken: - return "EndTag" - case SelfClosingTagToken: - return "SelfClosingTag" - case CommentToken: - return "Comment" - case DoctypeToken: - return "Doctype" - } - return "Invalid(" + strconv.Itoa(int(t)) + ")" -} - -// An Attribute is an attribute namespace-key-value triple. Namespace is -// non-empty for foreign attributes like xlink, Key is alphabetic (and hence -// does not contain escapable characters like '&', '<' or '>'), and Val is -// unescaped (it looks like "a<b" rather than "a&lt;b"). -// -// Namespace is only used by the parser, not the tokenizer. -type Attribute struct { - Namespace, Key, Val string -} - -// A Token consists of a TokenType and some Data (tag name for start and end -// tags, content for text, comments and doctypes). A tag Token may also contain -// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b" -// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or -// zero if Data is not a known tag name. -type Token struct { - Type TokenType - DataAtom atom.Atom - Data string - Attr []Attribute -} - -// tagString returns a string representation of a tag Token's Data and Attr. -func (t Token) tagString() string { - if len(t.Attr) == 0 { - return t.Data - } - buf := bytes.NewBufferString(t.Data) - for _, a := range t.Attr { - buf.WriteByte(' ') - buf.WriteString(a.Key) - buf.WriteString(`="`) - escape(buf, a.Val) - buf.WriteByte('"') - } - return buf.String() -} - -// String returns a string representation of the Token. -func (t Token) String() string { - switch t.Type { - case ErrorToken: - return "" - case TextToken: - return EscapeString(t.Data) - case StartTagToken: - return "<" + t.tagString() + ">" - case EndTagToken: - return "</" + t.tagString() + ">" - case SelfClosingTagToken: - return "<" + t.tagString() + "/>" - case CommentToken: - return "<!--" + escapeCommentString(t.Data) + "-->" - case DoctypeToken: - return "<!DOCTYPE " + EscapeString(t.Data) + ">" - } - return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" -} - -// span is a range of bytes in a Tokenizer's buffer. The start is inclusive, -// the end is exclusive. -type span struct { - start, end int -} - -// A Tokenizer returns a stream of HTML Tokens. -type Tokenizer struct { - // r is the source of the HTML text. - r io.Reader - // tt is the TokenType of the current token. - tt TokenType - // err is the first error encountered during tokenization. It is possible - // for tt != Error && err != nil to hold: this means that Next returned a - // valid token but the subsequent Next call will return an error token. - // For example, if the HTML text input was just "plain", then the first - // Next call would set z.err to io.EOF but return a TextToken, and all - // subsequent Next calls would return an ErrorToken. - // err is never reset. Once it becomes non-nil, it stays non-nil. - err error - // readErr is the error returned by the io.Reader r. It is separate from - // err because it is valid for an io.Reader to return (n int, err1 error) - // such that n > 0 && err1 != nil, and callers should always process the - // n > 0 bytes before considering the error err1. - readErr error - // buf[raw.start:raw.end] holds the raw bytes of the current token. - // buf[raw.end:] is buffered input that will yield future tokens. - raw span - buf []byte - // maxBuf limits the data buffered in buf. A value of 0 means unlimited. - maxBuf int - // buf[data.start:data.end] holds the raw bytes of the current token's data: - // a text token's text, a tag token's tag name, etc. - data span - // pendingAttr is the attribute key and value currently being tokenized. - // When complete, pendingAttr is pushed onto attr. nAttrReturned is - // incremented on each call to TagAttr. - pendingAttr [2]span - attr [][2]span - nAttrReturned int - // rawTag is the "script" in "</script>" that closes the next token. If - // non-empty, the subsequent call to Next will return a raw or RCDATA text - // token: one that treats "<p>" as text instead of an element. - // rawTag's contents are lower-cased. - rawTag string - // textIsRaw is whether the current text token's data is not escaped. - textIsRaw bool - // convertNUL is whether NUL bytes in the current token's data should - // be converted into \ufffd replacement characters. - convertNUL bool - // allowCDATA is whether CDATA sections are allowed in the current context. - allowCDATA bool -} - -// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as -// the text "foo". The default value is false, which means to recognize it as -// a bogus comment "<!-- [CDATA[foo]] -->" instead. -// -// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and -// only if tokenizing foreign content, such as MathML and SVG. However, -// tracking foreign-contentness is difficult to do purely in the tokenizer, -// as opposed to the parser, due to HTML integration points: an <svg> element -// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to- -// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the -// responsibility of the user of a tokenizer to call AllowCDATA as appropriate. -// In practice, if using the tokenizer without caring whether MathML or SVG -// CDATA is text or comments, such as tokenizing HTML to find all the anchor -// text, it is acceptable to ignore this responsibility. -func (z *Tokenizer) AllowCDATA(allowCDATA bool) { - z.allowCDATA = allowCDATA -} - -// NextIsNotRawText instructs the tokenizer that the next token should not be -// considered as 'raw text'. Some elements, such as script and title elements, -// normally require the next token after the opening tag to be 'raw text' that -// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>" -// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and -// an end tag token for "</title>". There are no distinct start tag or end tag -// tokens for the "<b>" and "</b>". -// -// This tokenizer implementation will generally look for raw text at the right -// times. Strictly speaking, an HTML5 compliant tokenizer should not look for -// raw text if in foreign content: <title> generally needs raw text, but a -// <title> inside an <svg> does not. Another example is that a <textarea> -// generally needs raw text, but a <textarea> is not allowed as an immediate -// child of a <select>; in normal parsing, a <textarea> implies </select>, but -// one cannot close the implicit element when parsing a <select>'s InnerHTML. -// Similarly to AllowCDATA, tracking the correct moment to override raw-text- -// ness is difficult to do purely in the tokenizer, as opposed to the parser. -// For strict compliance with the HTML5 tokenization algorithm, it is the -// responsibility of the user of a tokenizer to call NextIsNotRawText as -// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this -// responsibility for basic usage. -// -// Note that this 'raw text' concept is different from the one offered by the -// Tokenizer.Raw method. -func (z *Tokenizer) NextIsNotRawText() { - z.rawTag = "" -} - -// Err returns the error associated with the most recent ErrorToken token. -// This is typically io.EOF, meaning the end of tokenization. -func (z *Tokenizer) Err() error { - if z.tt != ErrorToken { - return nil - } - return z.err -} - -// readByte returns the next byte from the input stream, doing a buffered read -// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte -// slice that holds all the bytes read so far for the current token. -// It sets z.err if the underlying reader returns an error. -// Pre-condition: z.err == nil. -func (z *Tokenizer) readByte() byte { - if z.raw.end >= len(z.buf) { - // Our buffer is exhausted and we have to read from z.r. Check if the - // previous read resulted in an error. - if z.readErr != nil { - z.err = z.readErr - return 0 - } - // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length - // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we - // allocate a new buffer before the copy. - c := cap(z.buf) - d := z.raw.end - z.raw.start - var buf1 []byte - if 2*d > c { - buf1 = make([]byte, d, 2*c) - } else { - buf1 = z.buf[:d] - } - copy(buf1, z.buf[z.raw.start:z.raw.end]) - if x := z.raw.start; x != 0 { - // Adjust the data/attr spans to refer to the same contents after the copy. - z.data.start -= x - z.data.end -= x - z.pendingAttr[0].start -= x - z.pendingAttr[0].end -= x - z.pendingAttr[1].start -= x - z.pendingAttr[1].end -= x - for i := range z.attr { - z.attr[i][0].start -= x - z.attr[i][0].end -= x - z.attr[i][1].start -= x - z.attr[i][1].end -= x - } - } - z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d] - // Now that we have copied the live bytes to the start of the buffer, - // we read from z.r into the remainder. - var n int - n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)]) - if n == 0 { - z.err = z.readErr - return 0 - } - z.buf = buf1[:d+n] - } - x := z.buf[z.raw.end] - z.raw.end++ - if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf { - z.err = ErrBufferExceeded - return 0 - } - return x -} - -// Buffered returns a slice containing data buffered but not yet tokenized. -func (z *Tokenizer) Buffered() []byte { - return z.buf[z.raw.end:] -} - -// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil). -// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil) -// too many times in succession. -func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { - for i := 0; i < 100; i++ { - if n, err := r.Read(b); n != 0 || err != nil { - return n, err - } - } - return 0, io.ErrNoProgress -} - -// skipWhiteSpace skips past any white space. -func (z *Tokenizer) skipWhiteSpace() { - if z.err != nil { - return - } - for { - c := z.readByte() - if z.err != nil { - return - } - switch c { - case ' ', '\n', '\r', '\t', '\f': - // No-op. - default: - z.raw.end-- - return - } - } -} - -// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and -// is typically something like "script" or "textarea". -func (z *Tokenizer) readRawOrRCDATA() { - if z.rawTag == "script" { - z.readScript() - z.textIsRaw = true - z.rawTag = "" - return - } -loop: - for { - c := z.readByte() - if z.err != nil { - break loop - } - if c != '<' { - continue loop - } - c = z.readByte() - if z.err != nil { - break loop - } - if c != '/' { - z.raw.end-- - continue loop - } - if z.readRawEndTag() || z.err != nil { - break loop - } - } - z.data.end = z.raw.end - // A textarea's or title's RCDATA can contain escaped entities. - z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title" - z.rawTag = "" -} - -// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag. -// If it succeeds, it backs up the input position to reconsume the tag and -// returns true. Otherwise it returns false. The opening "</" has already been -// consumed. -func (z *Tokenizer) readRawEndTag() bool { - for i := 0; i < len(z.rawTag); i++ { - c := z.readByte() - if z.err != nil { - return false - } - if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') { - z.raw.end-- - return false - } - } - c := z.readByte() - if z.err != nil { - return false - } - switch c { - case ' ', '\n', '\r', '\t', '\f', '/', '>': - // The 3 is 2 for the leading "</" plus 1 for the trailing character c. - z.raw.end -= 3 + len(z.rawTag) - return true - } - z.raw.end-- - return false -} - -// readScript reads until the next </script> tag, following the byzantine -// rules for escaping/hiding the closing tag. -func (z *Tokenizer) readScript() { - defer func() { - z.data.end = z.raw.end - }() - var c byte - -scriptData: - c = z.readByte() - if z.err != nil { - return - } - if c == '<' { - goto scriptDataLessThanSign - } - goto scriptData - -scriptDataLessThanSign: - c = z.readByte() - if z.err != nil { - return - } - switch c { - case '/': - goto scriptDataEndTagOpen - case '!': - goto scriptDataEscapeStart - } - z.raw.end-- - goto scriptData - -scriptDataEndTagOpen: - if z.readRawEndTag() || z.err != nil { - return - } - goto scriptData - -scriptDataEscapeStart: - c = z.readByte() - if z.err != nil { - return - } - if c == '-' { - goto scriptDataEscapeStartDash - } - z.raw.end-- - goto scriptData - -scriptDataEscapeStartDash: - c = z.readByte() - if z.err != nil { - return - } - if c == '-' { - goto scriptDataEscapedDashDash - } - z.raw.end-- - goto scriptData - -scriptDataEscaped: - c = z.readByte() - if z.err != nil { - return - } - switch c { - case '-': - goto scriptDataEscapedDash - case '<': - goto scriptDataEscapedLessThanSign - } - goto scriptDataEscaped - -scriptDataEscapedDash: - c = z.readByte() - if z.err != nil { - return - } - switch c { - case '-': - goto scriptDataEscapedDashDash - case '<': - goto scriptDataEscapedLessThanSign - } - goto scriptDataEscaped - -scriptDataEscapedDashDash: - c = z.readByte() - if z.err != nil { - return - } - switch c { - case '-': - goto scriptDataEscapedDashDash - case '<': - goto scriptDataEscapedLessThanSign - case '>': - goto scriptData - } - goto scriptDataEscaped - -scriptDataEscapedLessThanSign: - c = z.readByte() - if z.err != nil { - return - } - if c == '/' { - goto scriptDataEscapedEndTagOpen - } - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { - goto scriptDataDoubleEscapeStart - } - z.raw.end-- - goto scriptData - -scriptDataEscapedEndTagOpen: - if z.readRawEndTag() || z.err != nil { - return - } - goto scriptDataEscaped - -scriptDataDoubleEscapeStart: - z.raw.end-- - for i := 0; i < len("script"); i++ { - c = z.readByte() - if z.err != nil { - return - } - if c != "script"[i] && c != "SCRIPT"[i] { - z.raw.end-- - goto scriptDataEscaped - } - } - c = z.readByte() - if z.err != nil { - return - } - switch c { - case ' ', '\n', '\r', '\t', '\f', '/', '>': - goto scriptDataDoubleEscaped - } - z.raw.end-- - goto scriptDataEscaped - -scriptDataDoubleEscaped: - c = z.readByte() - if z.err != nil { - return - } - switch c { - case '-': - goto scriptDataDoubleEscapedDash - case '<': - goto scriptDataDoubleEscapedLessThanSign - } - goto scriptDataDoubleEscaped - -scriptDataDoubleEscapedDash: - c = z.readByte() - if z.err != nil { - return - } - switch c { - case '-': - goto scriptDataDoubleEscapedDashDash - case '<': - goto scriptDataDoubleEscapedLessThanSign - } - goto scriptDataDoubleEscaped - -scriptDataDoubleEscapedDashDash: - c = z.readByte() - if z.err != nil { - return - } - switch c { - case '-': - goto scriptDataDoubleEscapedDashDash - case '<': - goto scriptDataDoubleEscapedLessThanSign - case '>': - goto scriptData - } - goto scriptDataDoubleEscaped - -scriptDataDoubleEscapedLessThanSign: - c = z.readByte() - if z.err != nil { - return - } - if c == '/' { - goto scriptDataDoubleEscapeEnd - } - z.raw.end-- - goto scriptDataDoubleEscaped - -scriptDataDoubleEscapeEnd: - if z.readRawEndTag() { - z.raw.end += len("</script>") - goto scriptDataEscaped - } - if z.err != nil { - return - } - goto scriptDataDoubleEscaped -} - -// readComment reads the next comment token starting with "<!--". The opening -// "<!--" has already been consumed. -func (z *Tokenizer) readComment() { - // When modifying this function, consider manually increasing the - // maxSuffixLen constant in func TestComments, from 6 to e.g. 9 or more. - // That increase should only be temporary, not committed, as it - // exponentially affects the test running time. - - z.data.start = z.raw.end - defer func() { - if z.data.end < z.data.start { - // It's a comment with no data, like <!-->. - z.data.end = z.data.start - } - }() - - var dashCount int - beginning := true - for { - c := z.readByte() - if z.err != nil { - z.data.end = z.calculateAbruptCommentDataEnd() - return - } - switch c { - case '-': - dashCount++ - continue - case '>': - if dashCount >= 2 || beginning { - z.data.end = z.raw.end - len("-->") - return - } - case '!': - if dashCount >= 2 { - c = z.readByte() - if z.err != nil { - z.data.end = z.calculateAbruptCommentDataEnd() - return - } else if c == '>' { - z.data.end = z.raw.end - len("--!>") - return - } else if c == '-' { - dashCount = 1 - beginning = false - continue - } - } - } - dashCount = 0 - beginning = false - } -} - -func (z *Tokenizer) calculateAbruptCommentDataEnd() int { - raw := z.Raw() - const prefixLen = len("<!--") - if len(raw) >= prefixLen { - raw = raw[prefixLen:] - if hasSuffix(raw, "--!") { - return z.raw.end - 3 - } else if hasSuffix(raw, "--") { - return z.raw.end - 2 - } else if hasSuffix(raw, "-") { - return z.raw.end - 1 - } - } - return z.raw.end -} - -func hasSuffix(b []byte, suffix string) bool { - if len(b) < len(suffix) { - return false - } - b = b[len(b)-len(suffix):] - for i := range b { - if b[i] != suffix[i] { - return false - } - } - return true -} - -// readUntilCloseAngle reads until the next ">". -func (z *Tokenizer) readUntilCloseAngle() { - z.data.start = z.raw.end - for { - c := z.readByte() - if z.err != nil { - z.data.end = z.raw.end - return - } - if c == '>' { - z.data.end = z.raw.end - len(">") - return - } - } -} - -// readMarkupDeclaration reads the next token starting with "<!". It might be -// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or -// "<!a bogus comment". The opening "<!" has already been consumed. -func (z *Tokenizer) readMarkupDeclaration() TokenType { - z.data.start = z.raw.end - var c [2]byte - for i := 0; i < 2; i++ { - c[i] = z.readByte() - if z.err != nil { - z.data.end = z.raw.end - return CommentToken - } - } - if c[0] == '-' && c[1] == '-' { - z.readComment() - return CommentToken - } - z.raw.end -= 2 - if z.readDoctype() { - return DoctypeToken - } - if z.allowCDATA && z.readCDATA() { - z.convertNUL = true - return TextToken - } - // It's a bogus comment. - z.readUntilCloseAngle() - return CommentToken -} - -// readDoctype attempts to read a doctype declaration and returns true if -// successful. The opening "<!" has already been consumed. -func (z *Tokenizer) readDoctype() bool { - const s = "DOCTYPE" - for i := 0; i < len(s); i++ { - c := z.readByte() - if z.err != nil { - z.data.end = z.raw.end - return false - } - if c != s[i] && c != s[i]+('a'-'A') { - // Back up to read the fragment of "DOCTYPE" again. - z.raw.end = z.data.start - return false - } - } - if z.skipWhiteSpace(); z.err != nil { - z.data.start = z.raw.end - z.data.end = z.raw.end - return true - } - z.readUntilCloseAngle() - return true -} - -// readCDATA attempts to read a CDATA section and returns true if -// successful. The opening "<!" has already been consumed. -func (z *Tokenizer) readCDATA() bool { - const s = "[CDATA[" - for i := 0; i < len(s); i++ { - c := z.readByte() - if z.err != nil { - z.data.end = z.raw.end - return false - } - if c != s[i] { - // Back up to read the fragment of "[CDATA[" again. - z.raw.end = z.data.start - return false - } - } - z.data.start = z.raw.end - brackets := 0 - for { - c := z.readByte() - if z.err != nil { - z.data.end = z.raw.end - return true - } - switch c { - case ']': - brackets++ - case '>': - if brackets >= 2 { - z.data.end = z.raw.end - len("]]>") - return true - } - brackets = 0 - default: - brackets = 0 - } - } -} - -// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end] -// case-insensitively matches any element of ss. -func (z *Tokenizer) startTagIn(ss ...string) bool { -loop: - for _, s := range ss { - if z.data.end-z.data.start != len(s) { - continue loop - } - for i := 0; i < len(s); i++ { - c := z.buf[z.data.start+i] - if 'A' <= c && c <= 'Z' { - c += 'a' - 'A' - } - if c != s[i] { - continue loop - } - } - return true - } - return false -} - -// readStartTag reads the next start tag token. The opening "<a" has already -// been consumed, where 'a' means anything in [A-Za-z]. -func (z *Tokenizer) readStartTag() TokenType { - z.readTag(true) - if z.err != nil { - return ErrorToken - } - // Several tags flag the tokenizer's next token as raw. - c, raw := z.buf[z.data.start], false - if 'A' <= c && c <= 'Z' { - c += 'a' - 'A' - } - switch c { - case 'i': - raw = z.startTagIn("iframe") - case 'n': - raw = z.startTagIn("noembed", "noframes", "noscript") - case 'p': - raw = z.startTagIn("plaintext") - case 's': - raw = z.startTagIn("script", "style") - case 't': - raw = z.startTagIn("textarea", "title") - case 'x': - raw = z.startTagIn("xmp") - } - if raw { - z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) - } - // Look for a self-closing token like "<br/>". - if z.err == nil && z.buf[z.raw.end-2] == '/' { - return SelfClosingTagToken - } - return StartTagToken -} - -// readTag reads the next tag token and its attributes. If saveAttr, those -// attributes are saved in z.attr, otherwise z.attr is set to an empty slice. -// The opening "<a" or "</a" has already been consumed, where 'a' means anything -// in [A-Za-z]. -func (z *Tokenizer) readTag(saveAttr bool) { - z.attr = z.attr[:0] - z.nAttrReturned = 0 - // Read the tag name and attribute key/value pairs. - z.readTagName() - if z.skipWhiteSpace(); z.err != nil { - return - } - for { - c := z.readByte() - if z.err != nil || c == '>' { - break - } - z.raw.end-- - z.readTagAttrKey() - z.readTagAttrVal() - // Save pendingAttr if saveAttr and that attribute has a non-empty key. - if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end { - z.attr = append(z.attr, z.pendingAttr) - } - if z.skipWhiteSpace(); z.err != nil { - break - } - } -} - -// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end) -// is positioned such that the first byte of the tag name (the "d" in "<div") -// has already been consumed. -func (z *Tokenizer) readTagName() { - z.data.start = z.raw.end - 1 - for { - c := z.readByte() - if z.err != nil { - z.data.end = z.raw.end - return - } - switch c { - case ' ', '\n', '\r', '\t', '\f': - z.data.end = z.raw.end - 1 - return - case '/', '>': - z.raw.end-- - z.data.end = z.raw.end - return - } - } -} - -// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>". -// Precondition: z.err == nil. -func (z *Tokenizer) readTagAttrKey() { - z.pendingAttr[0].start = z.raw.end - for { - c := z.readByte() - if z.err != nil { - z.pendingAttr[0].end = z.raw.end - return - } - switch c { - case ' ', '\n', '\r', '\t', '\f', '/': - z.pendingAttr[0].end = z.raw.end - 1 - return - case '=': - if z.pendingAttr[0].start+1 == z.raw.end { - // WHATWG 13.2.5.32, if we see an equals sign before the attribute name - // begins, we treat it as a character in the attribute name and continue. - continue - } - fallthrough - case '>': - z.raw.end-- - z.pendingAttr[0].end = z.raw.end - return - } - } -} - -// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>". -func (z *Tokenizer) readTagAttrVal() { - z.pendingAttr[1].start = z.raw.end - z.pendingAttr[1].end = z.raw.end - if z.skipWhiteSpace(); z.err != nil { - return - } - c := z.readByte() - if z.err != nil { - return - } - if c != '=' { - z.raw.end-- - return - } - if z.skipWhiteSpace(); z.err != nil { - return - } - quote := z.readByte() - if z.err != nil { - return - } - switch quote { - case '>': - z.raw.end-- - return - - case '\'', '"': - z.pendingAttr[1].start = z.raw.end - for { - c := z.readByte() - if z.err != nil { - z.pendingAttr[1].end = z.raw.end - return - } - if c == quote { - z.pendingAttr[1].end = z.raw.end - 1 - return - } - } - - default: - z.pendingAttr[1].start = z.raw.end - 1 - for { - c := z.readByte() - if z.err != nil { - z.pendingAttr[1].end = z.raw.end - return - } - switch c { - case ' ', '\n', '\r', '\t', '\f': - z.pendingAttr[1].end = z.raw.end - 1 - return - case '>': - z.raw.end-- - z.pendingAttr[1].end = z.raw.end - return - } - } - } -} - -// Next scans the next token and returns its type. -func (z *Tokenizer) Next() TokenType { - z.raw.start = z.raw.end - z.data.start = z.raw.end - z.data.end = z.raw.end - if z.err != nil { - z.tt = ErrorToken - return z.tt - } - if z.rawTag != "" { - if z.rawTag == "plaintext" { - // Read everything up to EOF. - for z.err == nil { - z.readByte() - } - z.data.end = z.raw.end - z.textIsRaw = true - } else { - z.readRawOrRCDATA() - } - if z.data.end > z.data.start { - z.tt = TextToken - z.convertNUL = true - return z.tt - } - } - z.textIsRaw = false - z.convertNUL = false - -loop: - for { - c := z.readByte() - if z.err != nil { - break loop - } - if c != '<' { - continue loop - } - - // Check if the '<' we have just read is part of a tag, comment - // or doctype. If not, it's part of the accumulated text token. - c = z.readByte() - if z.err != nil { - break loop - } - var tokenType TokenType - switch { - case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': - tokenType = StartTagToken - case c == '/': - tokenType = EndTagToken - case c == '!' || c == '?': - // We use CommentToken to mean any of "<!--actual comments-->", - // "<!DOCTYPE declarations>" and "<?xml processing instructions?>". - tokenType = CommentToken - default: - // Reconsume the current character. - z.raw.end-- - continue - } - - // We have a non-text token, but we might have accumulated some text - // before that. If so, we return the text first, and return the non- - // text token on the subsequent call to Next. - if x := z.raw.end - len("<a"); z.raw.start < x { - z.raw.end = x - z.data.end = x - z.tt = TextToken - return z.tt - } - switch tokenType { - case StartTagToken: - z.tt = z.readStartTag() - return z.tt - case EndTagToken: - c = z.readByte() - if z.err != nil { - break loop - } - if c == '>' { - // "</>" does not generate a token at all. Generate an empty comment - // to allow passthrough clients to pick up the data using Raw. - // Reset the tokenizer state and start again. - z.tt = CommentToken - return z.tt - } - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { - z.readTag(false) - if z.err != nil { - z.tt = ErrorToken - } else { - z.tt = EndTagToken - } - return z.tt - } - z.raw.end-- - z.readUntilCloseAngle() - z.tt = CommentToken - return z.tt - case CommentToken: - if c == '!' { - z.tt = z.readMarkupDeclaration() - return z.tt - } - z.raw.end-- - z.readUntilCloseAngle() - z.tt = CommentToken - return z.tt - } - } - if z.raw.start < z.raw.end { - z.data.end = z.raw.end - z.tt = TextToken - return z.tt - } - z.tt = ErrorToken - return z.tt -} - -// Raw returns the unmodified text of the current token. Calling Next, Token, -// Text, TagName or TagAttr may change the contents of the returned slice. -// -// The token stream's raw bytes partition the byte stream (up until an -// ErrorToken). There are no overlaps or gaps between two consecutive token's -// raw bytes. One implication is that the byte offset of the current token is -// the sum of the lengths of all previous tokens' raw bytes. -func (z *Tokenizer) Raw() []byte { - return z.buf[z.raw.start:z.raw.end] -} - -// convertNewlines converts "\r" and "\r\n" in s to "\n". -// The conversion happens in place, but the resulting slice may be shorter. -func convertNewlines(s []byte) []byte { - for i, c := range s { - if c != '\r' { - continue - } - - src := i + 1 - if src >= len(s) || s[src] != '\n' { - s[i] = '\n' - continue - } - - dst := i - for src < len(s) { - if s[src] == '\r' { - if src+1 < len(s) && s[src+1] == '\n' { - src++ - } - s[dst] = '\n' - } else { - s[dst] = s[src] - } - src++ - dst++ - } - return s[:dst] - } - return s -} - -var ( - nul = []byte("\x00") - replacement = []byte("\ufffd") -) - -// Text returns the unescaped text of a text, comment or doctype token. The -// contents of the returned slice may change on the next call to Next. -func (z *Tokenizer) Text() []byte { - switch z.tt { - case TextToken, CommentToken, DoctypeToken: - s := z.buf[z.data.start:z.data.end] - z.data.start = z.raw.end - z.data.end = z.raw.end - s = convertNewlines(s) - if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) { - s = bytes.Replace(s, nul, replacement, -1) - } - if !z.textIsRaw { - s = unescape(s, false) - } - return s - } - return nil -} - -// TagName returns the lower-cased name of a tag token (the `img` out of -// `<IMG SRC="foo">`) and whether the tag has attributes. -// The contents of the returned slice may change on the next call to Next. -func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { - if z.data.start < z.data.end { - switch z.tt { - case StartTagToken, EndTagToken, SelfClosingTagToken: - s := z.buf[z.data.start:z.data.end] - z.data.start = z.raw.end - z.data.end = z.raw.end - return lower(s), z.nAttrReturned < len(z.attr) - } - } - return nil, false -} - -// TagAttr returns the lower-cased key and unescaped value of the next unparsed -// attribute for the current tag token and whether there are more attributes. -// The contents of the returned slices may change on the next call to Next. -func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { - if z.nAttrReturned < len(z.attr) { - switch z.tt { - case StartTagToken, SelfClosingTagToken: - x := z.attr[z.nAttrReturned] - z.nAttrReturned++ - key = z.buf[x[0].start:x[0].end] - val = z.buf[x[1].start:x[1].end] - return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr) - } - } - return nil, nil, false -} - -// Token returns the current Token. The result's Data and Attr values remain -// valid after subsequent Next calls. -func (z *Tokenizer) Token() Token { - t := Token{Type: z.tt} - switch z.tt { - case TextToken, CommentToken, DoctypeToken: - t.Data = string(z.Text()) - case StartTagToken, SelfClosingTagToken, EndTagToken: - name, moreAttr := z.TagName() - for moreAttr { - var key, val []byte - key, val, moreAttr = z.TagAttr() - t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)}) - } - if a := atom.Lookup(name); a != 0 { - t.DataAtom, t.Data = a, a.String() - } else { - t.DataAtom, t.Data = 0, string(name) - } - } - return t -} - -// SetMaxBuf sets a limit on the amount of data buffered during tokenization. -// A value of 0 means unlimited. -func (z *Tokenizer) SetMaxBuf(n int) { - z.maxBuf = n -} - -// NewTokenizer returns a new HTML Tokenizer for the given Reader. -// The input is assumed to be UTF-8 encoded. -func NewTokenizer(r io.Reader) *Tokenizer { - return NewTokenizerFragment(r, "") -} - -// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for -// tokenizing an existing element's InnerHTML fragment. contextTag is that -// element's tag, such as "div" or "iframe". -// -// For example, how the InnerHTML "a<b" is tokenized depends on whether it is -// for a <p> tag or a <script> tag. -// -// The input is assumed to be UTF-8 encoded. -func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer { - z := &Tokenizer{ - r: r, - buf: make([]byte, 0, 4096), - } - if contextTag != "" { - switch s := strings.ToLower(contextTag); s { - case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp": - z.rawTag = s - } - } - return z -} diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children deleted file mode 100644 index 1038c561ade4683e91b37e8298fc5ead7c306779..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2876 zcmWO8`CAib0svrUzRb+`nSk6zK|{VtNTRiJd7yyG8JU16mRliLD^_b=t;%||>na8l zP!yF%fdB@t+Ui#Avu#}!)CeZrS5%ZMplz*Iv9&5~*B{>h;dOCwadCeq;GIS9q`Z^& z4zVS!huE^`0kP%QL!#hTKe0dV5pkd}k|?!C6Nl&oqRgryj?$?_d0`G=rZt2)emhZX z-9en7jfADpL|Ci`i8}faai*}0IAc9YoTX2R&&HotpS7M5e;NNJao+kBaiQ><_=2^8 zxJch1F2>u5ONGtEC2I%qt+kW*&U&Bt!TN}}690_2YJE;zx4sqEGeBIQz$5DSQiP46 z346kOMDyNYqMeyTbTF|*XM&RGx}8MyGpQt*u$=@DVW1RXU~nZTDBVa8s31KJv7}dH zBIym6lHSS`(z|gP>8ng7eGQqUP?<$eHK@s{jhpc_xP=T*Zp8tHe~|%=yGSwoHz`r> z)<_JcSPBnfsj`ez7!GR`jVH+&@Dv%`cn*ia+c-qoh(iobI27K&p-MXrH8hi<jV)xD zvXu-owBy;z4m?-cjpxA!I9xeEh8sr6@WxR*U-<|xQ2t0RF#LieluyYB!*d)7UyzZF zFUhFJS7f5{Ety!Pz+BOE%r(7_RZC`JRZ%dxd`Sqoyl4rzyeW}PMI4#BL`7aMN+K`c z>A?+&y|}@M@D2V1e1j9<8#Y&blbeWd+C1<rz8ALgy|L9O#8%rBe4FowZyWt_qj4H; z;$z&zNpO=*itlhTe8;H3HvV*MGrosyw)e5!HVZd%p}5&N8@Cwe;8y;8+{#7ZR%0Y? z<455(ZV_&?EynF!3~sl@;SPQR?&K?Rr%{DFZOORHwiI`Bskqzt0q(Y?;T|p>_t-LV zFPDvbjVnn9e-(BZ^RUCF!FO$1e2@DG-!tapd$u+BKKC)cZ(N7__@9t{+^1xpZ3BK_ z+^BiTZN?961>`V)8y?}C@Ca9iM~sK@DE|l^HJ0O1+cErze;hwDR^UgrD*Tvl#*evb z^0Bc7|IF3mpN(JPpKV{`C;ao|6Yc_jV*C$&V*3XF!oP@r;V$8){LA<$_h0g<@jLPv z|9kS8?F#uTca40(uP4WhjpT3q7V>v~7x}x*LB6)#C*N?7@EhZg<T(E)JZ}6IkK110 zKe;h5k^dVzQ569u+9QFxU?J<STEx1W7PId5Xy74;0lunO*4LB?eC=6ak{}yQ7UX~_ zDjo2*=L3;yBM_N(Xhrs&fK`1CSW_{}+Dm{$upb1P4uC-WF`y6}2MU!1%n;Op8LD$& zhUqIX)ATi%W&f595?lj8s_Sf!sU8Fi8bGk>CI~j&0$~Cx8>VVw!|d%~wxAQtHFbe- z!9%dv<OI>KG>A@uAl4OuxMFt@*X#=tTqgl#u|G&m!hma509ElUken0(Qe4A9O41^* zym>KL(aeFg;#82<oCQ|8W`kADn}E7_8&EeF0&UV>@KJFwSYKQPHo9H~8<Xr{V{tRs zl+*$?C$)nuu6IF!Yab{mc7i>wqhMdMH`rIA02L+E*-E#6u$9T1*vgX6*vgj8Y?a#< zwkmlmTUAoPR<-;SnBBGkbMki9T(X0$F4@V}xb0$VN_Mj~Ero1t@?N&Kq=>C;*#|7i zMsTvE6r3(O#&d6}m3X+vNIX(vB_0RjBpz+?JkPcSo_8C^Qyoa<WtN8U@=8DCedw`^ zx2d$8_fL-w-j4RiyyDVVykpDf@J@8(^Uihb=Uw!?$m=<1=M8yf@t$@Xcw>it;Ej8= z@c!=nmEv{&O$k<b=>=uMdO=r+-qkyl^6m<wrj(^pc*Pn@(N#nRcb%f<mEESe6+=`? z_e}jVpBO6Za4ePWvqqZz!CFe&y^dOact5q?r<N)>e1$6X8Kq1|gj8iuh`!2qi@qvt zD`oL5pw9FhpuXujMSXw7MqTasiE8$JOSPqkQ9bF4sRu_hsnJQBsh=j5Q_m-z);~|b zNsS%7MUC~gP`~%KQhyx1PmT8uQGfQ1QGchuqUqj0X(q#uM#8D|1fhf$2<3r-j3C<0 z5ll}ME}$otN6_w$DB80;hV~LB(q0Y~?JZnNPjaNtLSZgFIU|qubLeURjP<m?V*@P` zZluK-+iC3BL$g97&6b;JiKB#`Rep$`<+vIcB)mokWn8C&%IoP6M*|(^u+ekMJL$RQ z_vm?!hje(xFdbe#LeCeD(hD4q=?KR&I#M`BM>$^7w=>?-ckWN6n~%?+Tm9zH?b#7@ zXLcOjdpwDD_^k?bWakAsj;rarej55OKV9Ho*?$E7b^JBslKn>JQb8~-eI!HV02%2| z$$&qUfeL|)m*d9pDm-MoK2I5)<0Yf}Cd-%{KN(XoRR;a1$zV<Z11rQb*oPV9&&p7r zgqe^dWB3&chTr!-<CZgv@$?U8Jad$cPu~(on3K-5<fxezb2!t|KVQ}=TEMjCM#x&t zkxXlUl&sCXkZJ3WX4*wDvi96qrrjJT>k!2=9l1)T!@NY+X-;H1`;(b2(Nd->H-+gk zFOzlkFK4<%sZ4k73Z~oq0n^=|ChN&fXL`(;OizCn(<{oB_2%X<z2=oNhiH||k(<Zd z6RDZ|qSZ{lXf4y9yPX*{?_h><3z;Ev5i^{-j~O->Ll;qr{M`oRE(3&|2mo>-j|YhX z3QnwM<nDP<a`)=Ra`%B~xko^Z+#@d*dQ`_j@4R^ETfGeW4rD>$KsKCQt%ZKoA40!@ zPoRImdMK*?6sq!e!VGaR%uwgSj6pTb5^G_WdNo{GlMmJ6&2qJRH`I#vKz)q~>IaX& zj|Pvz{2DX-c<>}#J9r+h6JLbu)R*7}@nyJS@Fv`(zAfKW(++pmbjWuOzJZ^M-@-4% z<MJ;wE^yZn0e9E(VPUN&+&eS{7GV)A(nw&@kQDC2GHAkbcyuU8UXF8Nxh4-D)2Lx3 z&X-r=O|WWcD>T<`gC~Y|LJQsx>of=8Da~Pc23Nu}8Vfv&>)<(j8lJ}&;Q65|@XF9N z_&+=buWDYxtF^D;b^Hci*Sf%ZmVoucJlMc8u;B!R4Z{=Q4VDjYX$7!}^?^-V3AAaY zuw{5AY#&}A?_d*PhgJzYhL=ExmV=HHYUmi&z`Lv#KG5pmL+xj9h%JCaS_2%>7Qr{{ zVK}aRj7au5;yIy$(s?N;i;seG`Xbsn2|=A7nqm<nKiwMSFIk8Dou8tqmW^ndZZi@| z3XsUT9R*lEM`GPBge7|vSZ6@2qzJK26Ovf=BdO#dk~+&!pyeo%>5d_}q!P)U)ktAE zfu`$Dq8XAiXom9~de3qm&D4E^f+Uwwkn=hUw%kA=Ix7m5G@($Z9fj#y(QHXOn(gdD zb1c1RuI?V1CwYM8IR{X<Wf;xZjiLpTAJGEm6BJ>2h9V^|P^9xEiqgG83nj17LgzSI zWceE{){`h&N}=cxh+^vaC|2)=V(UCmoZb_~kNBVjsUK2G{ZXPc043_>XsKR-mexh0 zl#wX3LK=h8q$wy(pMlawGEurT8)fQqP?lbgvPbgKO8t7YYUDGd)^9_ax;;oI-G_Ag Z1B%rnr6^x|0)0GUL2F0Oqfew4{}2BS2s{7) diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes deleted file mode 100644 index 34751cd5b9d1b14c3a0977e4fa4b04db058b241d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48280 zcmaH!2Xs``*2m9GGBc?V0-=T?iYSVxfMUZtbEnr#dJh5?RIDhrr;tup#4a`z6&1yT zVja>GniRXJ*q+b+?A`CTPk2UM?|qB4_Bv&k|K4Yxa?iaJlqxv8RVnAhWTmPq>`ImF zN)ke;Jk+67WgWf#E9&VT(xg;@k)o1}309S~=N5-jWrG7s#s9Tf$P%SoyXurOzi3d& z&dgd$%Mr_KpsoB-+7jo`R#MffZ0^%UpSwZXT8e4&-eoFP(_>e5BdtZL;zFNNZg+>W zC+5183OeXD9`FJ(+N@M~JE+WfvT_(F&|9<v1kQe)kb!iRUffOZX(=eU1&_R$jdUE~ zRgQ!W&7LRGQZTN+QjyN>z}@18xvrhIXFN((+_ea?XCXxs@vUeMvff|nREqPdT1qMS z;lD4aQWBrFf_@KT@vrEpXirtCiE&_Y9)w#3Uc`ch4Pd#wUZriGhz(;LF1T)Yt8^n5 z7{%vM+IvZxQogf$REA--!jgj1CHHly{>Euk12<4D8il^1QBB0}Hygy+Np(U8FfK)D zX&IQkA)m@NhNIMc*;Lt^Z*wVCz06btjp?=U9ow!3Cd$y~DM3|W8<<@^=yOl%M<lm_ zz&s;afP*ix8P8)IL%6VzI2ZiXrBvZRDH(IR)S%Lf(m-H<pz5?nHK_9_Ojf>wSWOzy zs5uBgmG9KiTVyKho`_ZD7^i|D_#S~UP9-FjO<1eC9tHkYR7<?|AeC`*53Eyzw@h}a zA+GCc)sU98Xm+jbP(uv`a9=A53TKL;&ydL5n=HhJ(xFLeXver#HOweP!njxrT}%?j ze@UnG63oV>0c<qZt6_<i!g6O4WR0nY8~2Kh8y2hKiRUTh=Xy{-G>GQ@ZG>dF@I6n- zcm=!0D{kU)l!MZb@Hvwh0OvR_<Rti<H;`-2uS_+<`Kgr%okhUHI|-s|DkYaBgUgSs z>&z52%2?Q<MztIXpX*wW8tuB)uSRzs17Ezcm9{&;95~&o#<=dTRbyJStm=T4b;7(Z zNsTo|QHss3V`NTC#^iKL?wbJ_?%o2l!>Hz64m3k%a&$dNJtuS#Y*&*IW~g}?Hfpwj zCH9m}s>pixg3r4bmhk3eHFj6Ho`AXOaHqRrE)jjBlE8Njs@y?H{ALWfZZH{??_eo@ zqE#K3nBY<ewVv1l^TBrX^@AlZFIkN<t6fTkK1D&`McSODE;TN(48G_ZG@GkCu{O(x zmdgQT{0qDOdmG@p3qW~c1~2bk^c7!bSK||%h&gu>tNdSSb6tW&`AhIs79|UzS~V9y zRdfC75F_AHha?U`!e5D@!tDSr{?J3#UE0W^HJ@tv%oKH~aY(zG;<DA_+v!beN@9Uk zO*IBv)ik33;I7+j>iAND=Gy2boDTul^^#8=zxgJdDSVr%?~`VAy6cy2oH>cA=>$+w z0+8lefHW_{xOvT1$UQOu>xJcWa>H$<<hE{Cr?*MMxkuxIyW9cX5{Ub8#N3a=QoRde zEUi;#xHed6`&v4_?g5LPG53t}K@J6<=O{w#IhtP2(Vgmyb`R)1o}ijjFt<g`>3G|v z&h+%JRcE%uq~nqX$enHKY}eCNi_dANn$b!~uB2p~0U-Yz07YB9jODWs;}ZO>sc_@l zg>~v2!vIV8&3eNAJ-y*K(H9=jNMhcbg2ZC@%AO(yl|43A$D=4kciYLFH}TCs!L82k zn&4FzxK^6<UX0z+(_txiyG>naywF5;zTY8)m{xamVLYE+qpgkDdti3eyVN}6Gf3jG zT9V<NR&|kK1CX;InOy5e)^!+sk>yycxh;umR|={|c^IwLrYwS#iX+1JoEIVzMeC3? z4mZ_BovUiq#pbu|bc9;fd?Tj?%{L)de6mf=-;A|OTDLAzmpXH;>N4XPdV^QtrLzt# z!HMWAm0T;`&hQj367mmvW9i+>nm7>V_><yHyk1>yOrToQ)vB)Wmv*Qt5~(ga&h1kF zFzj^r9_>+A#>!jORb{!2czLFu%(kJvygy@JFdpBWx#U4!7Qrr&kts_~Rad<^0pJlG zgZt`mbzrhu;9{he>&{M$cOx766lQm318q+d7te0mJg;=B1)GaNP%x_=eZ!^WYxulJ zBI})kQtvSiRU67=O1eK!rg~N?<U;to7Yob9sQ3P(71Ge8YC9IxD(kkHwN&o{eR#R_ zK3EUgDW!LsAYaz0y2#Pu^eFr_Mj>mALX|O!s1^L&MGVfwkZ%~qeaHE*TSTdR9Zvh^ zn5r%@A2H87VR@BQ@gJBZ1oi%Z0kZf8KOM5(2cE;4^KO&jIRf=X(*Yhl)sF1KI;>p? zNY793MP|U~`N@Xt*<ki{)dT$N1{8E5>pYp*y9Q7)E(f!BT)nDy52#i3Z3F65gOP5f zBONWqKx~9h0fuu4HhfRFsD{K?dfij%Rb$V>TGeC>K|$H^053ZpYi2)eRBgAbro<O? z7+<y{JE9IOn}FeTQFX1QqxfC;Tx;D7<AWHAuQye*QA0=UWe0#Rp~HI#o(Hng?0Oxf zkufQ%d24k&fT~dymPN~4N{4ZCt6G$|vlayJx2Q#lv1lo*CF0fNAyp3oJe+Am%cmHx z`qifv8)H3Gudh{$J8^%B^Tif?d!G<HUxXz-sSUCnma-cOVt5HAqc^vxC9U^Z$tFSI zddo-HR}hzSC)FDEZWX^t#BS<ROU+9$>3uVe?951p^l!%fHT1dybcCk3k?`Sy_i_f$ zcQH094l~u##1vfclSs-~)uNV#c7Uq#8Q~j@AK~Hfx%V!jG?KW4CD>tqH>9!wa;}j2 zF14)nuw=E|d<JIILvX^4fHXZ`N-KRB{|P>mf6H6fxNzo;dbPqCw5k<}<@i=`J@`uQ z^T7NJV#UKyP*n(U=ZT`~HB((}<h#^02@~1!Fdb#9fonE5sFj9tscVfiI^r*4%_zmo z{Ee{WZ|qU4j00*xKc^Mu1*E*W*soT#PNUa200o}vB*u3I(eQqNZ{{Yiy3V<VHuuNd z=p9B3JZGk>>k?M9c+<Po?Z!j3>h{DzRD&P1t2>N-lq%+c*?87eYQ|{4x+9U}A_eYA zChW(d%9GiFA5Tj4h9tGdC~m<)c@Bv9Y>>((a_wG$ab_!%{e1LA&aZ=9RHxP?R!}Wi zfoyR<uoT-`>A1{PcY6<`!+RLx8(Gwb@ni73>_g)0`;1cQ>B%aQu)|`y(@}r^au8gz z0?b=m)#mMFyR|i1t6Gc$T#U^3C@or!4RfxEjWKqRx*)O39mIZKn`+tIU{$ToVK5hY zz#O}hj`BfX)!Na9jj|g+5P6|awHYO~s_oUc&=PsWP3~4NQti=>TGier0V`hEt~#PU zF4bvxQB^VpEoDPxl{%=MF!>k&qtmWBTb@CmnMH=>kH#Z!Im9?W2^X>&X&Z`|zd~$S zI#gHU?poDt?y!P6mo{%j1JEvQqH5xT=SQj)vJ-n6D8*TeyAyk0E?x;f@B5lWh)^Xd zP-tT!y01;f>2F~!lEt!cS3SLwlde;2swX@iiEzxLdRh;lBd^A*ws_>R!2LDGi|(kS zR0WH>$^$vYiwzH@{OjFnOY8Bi`0+~zs=jE)nS>3})5ZW~AQsA4s<uX|YSq@wWb8KM z_!gKSP7;!=wj~tR=;6#p=OpZ!w|mugXQowcPefbP4#N%t|C$CoKMbeK$0h^Zi*dtF z)pbst+R-y3$mq-79eflR&Y8e)&LqUH5`3%qhD0)FQ!V=h8~&C?s>@9EqIqI1!}Cgt zdNFahRqZsc#E|=pI<@P?ATCtMEQ^1(2rUse<Vs|X>yp%N-wjlq^(kt1_v<d=_YS1s zX(ZxSn2k*))pTTQcEV@Q1yF!T-#tMV?mdY&VGbQlTlpd;#O9kZS^27|UUJ@Bt6oZ! zyVT3BW@H0j;GlmS3If;G5&IYt&W}t+I*pEk=ZQwlMjpgGxEd=+ffU(czPldv^OzMe z)I-|U%ZZ7#q~ozx^@`_GNTRX@kd>|KRsTMW`}ZZmx5`wnIm1@<+74M{BCptp<Q<Lb z^)RXYdSXqjdLzCb(lXhl-ZUPmRc|J8QEFU(teM)V_PVwtlP}Z2Qoa}TL8qzSN<8O+ zWi-}2ht#RJjnM$AUQX6IpYW+qjM0ot`PU9uSpPn0KMAqQ>p@?6{bGXsvrT>Kx)@or z)Ks4)4xr?E9c%tCP4!tK$;wjj2__33N1|%Ahm^k)%+9Nbf%Enb^||qCt@=DM*o8^i zor9}a5|`!ln#Y>t+FD)mC~M0NE$VmUKJ-;S06ybBY?Q1>QonCL8(H(@B#ScMYN2BQ zy;XORNa1OX*!Y#$*YxWU!W1vxiyzLH>n#?evKB07wOcHSwHR`5MxSAAhrDW9tch2> zV3C^>W1X8U-fp)fRbAd_NlM6iUsZro{|7#ht^{-53t}i?T2fy;t<I8W<XSChEj*<v zBR7?ftsc}rL^bbDKOvsbU`hAn(N^jq(Vc_P7kDe#lAbsaK+Y!9lHq%RlJ9{=W$k>r z){@b=fvW#5Jc`SfAC6)?e6x+wzd)08OJ?3IJJop*&%R``WQy04`Qq6~__C02|D=f^ zD0V}3qagpA21~!vsa8wBgx6~6pYV#RS@cF`g<yFz1(U_emMrr`mnExZeG8#G1uc1B zpg!;4?O;jsSq8+}YnA^cw`D*g3zo7PJc^&_VYtUPQ1Z1~vYm}$JPw~bPPWI3>LI1r zh|9L^T*+n_pFv#WGg%qpXC(<qvkY_|=CTazzRPME<UT6NGU&xd)W_GhT5`M_td^WT zbENu38wze@pxm<gSANk!b!vlUura_&Defi^DJYFJGEgOJ8wt+cl-%E7Bk~5pDL4Qa z(bH`(ceV>bL5(c5#_#kx$CyIU;v9om-h=cOK8lv;gXF#UEco)~_E-idT(lKl)Jd?1 z!xzd$B9t3o8Z7gmsss0ptR%}2=XywDY%K`J+bu(5-_jeEqlWixH0S^9whT?YPH*)Y z$(CW8FKMBa(*U45lP$Tf*{zn`#C<MQ-3Kb~cO90I#(6N8Z|=g7jH;{H3yg#9cv;g1 z-<`<1lJUcp2Q=4Nn(QDD!(egGBL?n`h#CKGr8h{)v#-rEGVu{Mg1=y+aI$F`WgI3Z zIh!dL>nZmoj8@<%y!0I%hD;S}KQuD%H#;n&Hec3c8C`yMn`MkqWwne+9D;1PgWf8h zk5tr8$y@2itz(mk>8%|YnrO2e&=pKU%Vc2q=e0wwuVZ9bhaiag?yj?p^=xuk#wN0B zLC{WM{o<Qn7Eu2>N}*%gQ2#CtmbBU|2X4NFs{2PgFIns*1+2-W%BLxogN%MH0BUuk z<*RynZ4S#pi5z-e6;{hQV;^vheI`rz3G}+<2Cd}zcFVX{Ih^Dl2sHC$6a?l|ihkZ; zIoLImj=+n_mV*-+lxoJ|pmUlXYe`+O%m+cib*PHG=dp};zbjMYsWzNmipkKVR;;zu zS;i+?a3L@QrNJILoB=D=2AGyZjKi#!LlPD=mp=<j**d}=C~*VhG*rb>7a_5pFcn|z zr{nNcWcPMj4vlZIu_Ic+QZ~B-@&K9xRW{3^i3plYex{nY5`6LM4$B0;JlVv%>KX2H znjp*UmI<3*hR<E?MRvK(a+tHA#d28dZ!KuKrO|S@`CF&u@V3-i(DT%$oO|GlcB7zX zIwsw0MXIWsEODRYf#nve&fAiNBwHpL<Is}-ORHsK;$atdht*k*FzfIrc44yRh!^i? zu9_a=QtkyyO;ZO5o=&z*GDcy|HOOU|lt`iEUWi?voW3f*f-mT8Bg6*Pc)Az9dpilq zuPAk{BM{CPY?dQkBM>V;onEsNS@RK$n~!)cN489IS&lM|76xjz$8uE5beH95V?eEC zvVSh<V<U-GbasPfa_0lMP~oI4Zx-s~chKQJ*+$x)h;dI8q~6)dmMMm%)-rX^%MDbQ z!dF$p$do_UC}bH5CI^J%<5nrE%4W7&jxpj^Lb6k|497-(HcIo^A&zM|4v@apb(Uj| zF)qtA*HN{WX`64St$YkD6^EOa<BS@s<+wz$@O=yd-$@d`_tE0!%%CFqb(WJ1i`7!@ z{G`@WVZ`7wCfDNXIiyPT>3SS|FvU`_^TJw7Wul;k@s%~({7TX`36l3(2C8P&S*rZ0 zwU(+xKNLh|t;=_|(>ugu`Yp3ll>=^IQ5~g1+VV@K?VMz+_4A_OQA+ujVJ&}Q&{7=< zqs4obhw3ehAvdEc{{iWJuo1EzX6ItNrMhLbmGM;_tQPGyMm3vo27Iu@-hn0djswlr z4MeTQ2j*L;nzwQTUL_lS)u?t$jj@o9s*jo=9q<`uo291Z&RUG;(B^%+-ZH}*bEEXF zEtvca^pVFpr39A9MoO+*TUiP|<MAuMjc6ES=y3mlDyI*UzoX7FqooLE@_#|LfCoxd z6M)58XoD0vAh!Xxd<Gry_1JJ9z684~C<RVTX90LVjm4;#YPgnabSmp<-glJpE@Bw- zuUx`h4KV<vPleF3J7_tj=7X&mN+oIq!+F<Ha40MVhr?VDyap2OffQ_klo5!6^Or&{ z5thrNkSj&DQJ5cCYB^>1lcME?E{MEsDR?&x^1ihFGo4jn0_qEo*$H`-R4IHNvBD3e z<8z`>_$$>SM=@kL){4rJEvlBbO|TSg>xO)f(qb<uUmRZoIZeoUs4Bh~EX9pT6yJnI z@vS|OwbCmQDt;~#@@fR~b{6D4Y5P1(AQYvNN$HT&mqX;GK*`0ZFU_K@bT|)`rK3U+ ziAL!(jF+Agu*@ty3%=5GFj;y&9+j?;>fO96Ds7iimq=^@OW7bWmpRc=wghWsD>EQ# z5G#9x{j%(FOqM;H2T8;r9jGeXvJ|qNu$S%H1=&Ze%0B7``S%tf!t$*!|0r$0T+Ilo zYazBcBo*26jOCC47%CrH067x%<x{pojukQ;Xytyilt<84UXTtsHw@_nZuu4|Z5Of= zSIZ@7%70)iD^l^JA{ERP<3g61Z%l-_A`iZbQ<hj}Rm_ID;`|iIyd{wO9!OIVBG;M~ z%a=lK69sn)%Wi4=70s2SfKfRy1PLu+GQCe*)ras^eat{r|AvB^BZfexz6z;Z1i1;m zn!5%;?uEHVPD?e93VA}RFW_oTD}kulCT(wQgS-owF*t-DXPA~*d(9-KUyRuk-05Mm z-^FAq_|k5f?c*U+<^6~sm7ldyiZ?UM9%6#!?{rvZw>`(~4nLb@In|gBUo6KCIXD?r zi`__k*lIZ~+EZ&eEwPri(1<3>>8?9nD3CMtaGp(-lgnUzm)84as2_t2Iqr{e5M5)D z37KG3Dy^Fc=@$xe@%DFS<44*G?-@*re#+2BrV|qPX&@~=6X5x)K~+-yBJ0(lfw=Gx z9f5~@0D1*4J+(dLWMwx~=(bccC!c`n_k#t$(BbkD67y_e#9ksU`V~&>7e=AR!yvnE z>m*F0GAY^W;ad@e+<{o{tBN70cRzw5Pd2^&30^>|HgOu?ToFL#zebzB?e>Hi54q#+ zy{Fc<(*>t3Cc4Wzuq%tX@dc{fho+&=AeHrfVsII7-B+SYUt$I1NL+O@EEOG;0vDjq zWhHc#Pi(<+d2laGrZ@gz0KU_E(6W>cL*A#9#jaybJdCAOKS;Ra@D-g5i|KEI<(WFL zbf7AhpiQ5Az4EB#m!qUU@CBMts*hmNT_7!dnb7GyIl2%9o)qF|j3MQFS9D2%KaIXT z|LTF)gRB#zHyLyaNJjLvxi(VO%%iPI`*TD0{z;$QGxTLsU;1c1*N+6pD(~(zi{)^M zuGp|lZJ<;@iW-kW@*W}e47t)XhEOs&CtB+tfJMC$R4V(|b|FL~<A!==y?!{+$lcMc zRL%=alp0xudiz`gkvoDBOlz@IeTd$SZwQ!e0Ub%)L0NB{jRN&TKcz<H5}c7&22`qb zb}M{4h}x*xi<C;fhDa(og5_RM5S7(J5C_(&H0_Lj@ufYVF%mI7KlDAOzH5oeVNq@? z{uGU>2^}~iw^lkXRVTSQ$z10kOa7Sz^9KNTK7^{k#jt37{U!BsmZXF4)nrPO>nNSo zt}+rbIql<GRi;EOv**Ze)h|u5F72{qkbjWWI*3WrP&)GLRHd@&Gf+CFMfKbIE`b;j zt0(2<fk<PqzfB|;=C&y70<OY(ODnGcz4<UKX2?|iH|sn26>gQanZ;v3Lazt;jFBle z^nkjnlPC?6anqUe93bO*rIF`1*C><3bO_4`I>c^P@E+o<p;_J#Lt;%{)eh)W^*<H( zs|nrz?A2qWPQ|Vog-JD`1!5O67UM}LN{59~=B2cGiDYl0_ocFSt8YN{Wl~>e8wa|O zXsp{GKW(i~R;NH8vyx}sq=T`c-ujQVzWLTC))z?~KmB;)lEZOM1IhZwf}GR#&Pp{n zdV;?<bk;@Rb}2Q~OhY~Igw;rAC(JTDuG2SD@(F2hLKaBr`GsWN4ht*DBucI`Xftc5 z2Btgdl^iz|WN<Sn-#dQ?OPfuIeKTZ~kaVf0NJ%*%$x=;TC{-bLsd7t&zReJa6Y`Lh zESgBwvQUTx(od?6&5)~v*cL*jQ~G;M|I1R<8#<D@H{tYjNj;XdNPfc($zF^LXErP4 z$RI)v*%vYgyP3|fdI{a+2E3GaRO!z#j00|@E%lBLf^#<cTr|?R-~F0mX3GD>qGZwf zpS=Z}Fkba*GI72gyZXb9sJv&7Z9u&ibB|_rSL_V3RXzzm^HV=1d1%$Xd1k<@*B9qp zR5{OQiuWa^_XU`HmzyPwzFu?krghRLv8sLykK(r?rn9Ny+(s6Kw<5GTQXoYvmhqoq zDZ3vmMk|3ZatN6FFa|zQ(*tHnJww(%{rFJW1GE05D%h38F8&@-^RY6hs+kbO#wZkA zL`=gc0n+%amA$7QM1Mcz+$SZ;tN2DdsyG?GYIhO}TI=YLS86&q#lxu17=>G<$5Sdf zCzFHHD_x)pgFat2pbB{nShK!Wjo7SLr01n~G$D3BNry)knZhj~^%Uc!?+B`{<C8#j zLmee~Z*SzZO6)1=5S!gp&C(?H(6cGIn&>G0XB~X!O7%`iMVpNn$hD?-3+VN$U+=GU z_}rlKonk^3K+0NatCYKI=MqTKiFJhOxF$7H&qniUdP^Rs%{UoiPLt|RNcBNPqs)^8 z=FbAySU_9;#5y&yU1H#zM29h$Hs@gC5<ba}jkh`k2F5+7Iq2O&ue>AZO@`oEbQp5M z9(e`XXgBC}Mil3EDr-XT>`$8@7=nb;Y(-Yz2<tWbYt)y0BBTV_qBG#r%bfWERnt+& z2;M*`{#7d-vTZv9CJHhzY5M6cx{>kV5|GfnkSZlc!I{wv`Orh#2jr>gCC$tm$v^WW zdP5JP%5^;?{xxEe#dW0eEF{Vvrpj#!TI4px$f3hu)=FExiOGLKf<09AmfD-FZFO|a zg5+PB1X<d)KX+Y6!KYWVp`WLz-253^Rc^N&%Z4lhm0nr(7kN61jSW`X%7hHXu78sl zm;J*yR@&^4!0Rqb^3p=TuizRS7;>Fu$j!E~SBTsayXB72h*L7g(&n6Nr6jvS?`@-8 z8yYQA9RN|XsjCxcQ!<&9z)%8135*nh(YuWa3?(p91%?tBDf^^NU?d3)B`}o0Py!=a zU?_p11cpUm=uJr-CLIDp2@EAL{^x3@%$EPBH(4!0fh>o5Ym+tS0If<Vf7X4H44X`) z`hVVumPb*g$K!9-ta2s#r=4>;e8v#YC3UZoYkE^EJoZ1b+L}9AOkM`j=LnZvm+O}_ z^0uXSfj3%&Mb0Hod4XbBTObl4Pk*WIgM_}XRR>mi+??{Rr9<x|mIAQY9_B!mH3jse zpCx0nW|ElpbGy|+mDd3%dn$Q9=%a45$cC#u(wp^P&ujZTPH?D$?K7Ly!5#0gmN-Tu ztIuW`<D>i059xMrqP?_EjW^B#M($PERaG$S6xBPEj&5&Y9|O2v!z@JvR%d7LZnY+f z>fu(1Yy?UIAxFR7ji6vWb_+h`*?I3g#CrF58MnT3tPIqlZ!PmF>yppyIG6?*adNks z;45xY6ZU@QCmK8&9Tqt@NQiA+>hL=bXJPCoN0N;3eom1CL(017#>Hx4io8YcKfhZ| zyvEl>b|(4M5&lX@$J<HDx?*)uS+9{BFI(LbHM!?Yp6+bhnUWc2wiDg8i-^!G42nLv zWpn3t*tTheePF*jb+~bVU2jQQ3UJUm9T$`&LoXR+{ZENw-lW2|fSS~~(5{Xul+hZs zqE#K$QfXI5x4uF&`cI-Qbv~zN$HEjy9mHChqK>qGTc?h^4Q8blU_&in<+2SVYC}I| zWCkw>sXj?3AC;tz7NvjZ6PC$;>(F9qSqsbtAazpfgp-)|&Yr%B(>oVrWiY0=Abc-6 zb+c@I?x#?oN8c;Z<cpsDr#`-QQNFEGy{84zNGbTdlpeD}vZXYIHY0_S{_rH_L&6?8 zldAt47YkV>y{_*ddcqpzv}qX5nY6iYwNjP6BK}0JI(C=ZN>!E--h@E1VbO03xJ;)s zf-1MVAl*n+-{I-2tBT`^<cuj+dV_RCmRQv>o+kt9n7u`dm38@34t1<?W-Ym~EopyD zN7A8&U%&%f|Mxq9v5i>S!wqVhjz;q90Fr1Nmvu0AP~HoZ(JWK4@3b`XC9)|;PI%g_ ztg~SmnT=R)i1oIJTh<17G?HEIxT_C^CG$p-L7#&3Oi}X4BgA^k4xo*^kXM(tzS2f% z56Lj3qlcV)z^YCdIh1u{G~b1*)KvgaT_wv1pEasM9Rt(}?YOG?JxMk913!Mq1OA{~ zR)O5awW?&vmrMvX4ns+4T%QaYF0<mPJyc)z;r^+_U~qE?Yj<uTm8YbU@mWY{e2(+c zr>=6SQp+-WE%$pNZ#O|cCD_SV0VAiIxn;?7kmHviJ6J}4@JIbo|HcYr2g4^TYw6Y* zxIgsr6jhe}cm@l=-D#@q^;41P|6>mxWl~DLy%~H#rley{7Y-`?$T^-;(il2)dMCZs zUs+dv5@Atzf%o0RUP}k_SY#~^QnK9R2bBZ2a@Ge?HJvuw*UZMW6KEUS%xDb?plZ~q zOJP}#AN}PHASr;2<TnUJ@^=v1F(P(B2(o+$<ew-=`DqIZPGU~m-j<H{Bg9!{LZ+j@ z9)ZukP{?l{RX*}Fk|uW<VuRcSV)SW{<nv%28DIcp^(lXO8Avm`v70qIk3m-3RYmFX zjJe&9mO-ntD6O*bG;!b-RE^k5Nbc^z?z~h=4<nW`sS`5U0huBtrw<}8EK|;uHhC$P za=uir5>n@bGz(emfLtwYE1Mv9NcABpJtpKSAEZ-CJyP20fb0<Rb|++?kWZQ*pZOqP zI3Qn1n|vyeYNeW*?t}D~>R>6!%cj&(osh9oJyNPun;`OBk$Sw8B0fl=kZK__h0Jz9 z&TfLp7fe}}G=}z2oX%atxFtVGTkdwUZs@oHbc|?Z**uR?$f#aSaAY6sKbokiv6Awa z!ZJucMe4T%l6DITtYp1%$PI4B#6^(f8J-M-fzK#PQ5Abe@t&#w(Os(2x1p1^rIN+V z$>Q9-sj6~wvZ!)v!l0z(Z>M)p7?9VaW#mJWK6~g$k(>O~19w6`Xa?W&C>?PmNt4z< z9;A0M$m#FoKz@ZUBfST*SW4^2oQ#d2?>}z`38LW39FjqT45TA-Jq~8Rv<(DlQOH2V z`i)!)xv~(_%-wLmmB{vcAclf(Ff{Z!@b%k?f_}e%s{i@)X0-!yz(-`*pyeUR@5`9` z(~uZ?1geH^L-VlDu3>e&7KsrT5Q7nqqblPCv}Ame2l+P=8Q;;7nM5@+)ep%)bLIpv z=dMkGJPk`$8kmRvt6No7edbqH+b$#anTuHFl4}=X@*mw)A7vO7ADyX`FBxCEL0j^* zX{tKjYG=7_&E&)HcjKxi?^70!?5|N^9g@bFf4_|j@LSXI=%0w$7A;XThDA_nU5)yo zD`DyXF0t>Aan=_fSOjENza|F$5hRjcM>gwH&}UtVTUiU?lLOz37Z>Ad*6k^fwM@T} zlbcy6t|IJ76VSo|P@NK)zeJs~dqoj0d`U-6S1QAE9O?(2f)>licr<uK5ZNtQOZu%p z__h<A-1jhKZDi3JwA0I_*dR1Jo^HUceaL2Yhls{3M$j@8L-tEiFx<6N&5V0t8Ts5+ zHM4O8<2GncD;;Y(P%5X*0YBhJwu58O%6?11vNDB^4^z~vg5zMZtOLQIeaq>PIb*q` z1sBdE*tTn4#Kv0M?3E0>zO6{Vf`QLC4;YRkftK622+ftGZT1{OGGcfa3)wRPB;KX! z7=x>h>LrkQFgxzWuHz|K9D87KybSu3abd)MB0?FPJ0V}BF)}<Zscd=2mwXT`*<+Bf z$jy7YC9Y<@)@Db-iXYkTkeXdH8J65nwxaI>_)_0Q^YEG5Kz}0W2lk+T*xF?>S}WA- zx4N3~P0nj+{b13Tah6YTHM@}DTz^bhomzEd4=aB(s7~!Z)sNHX3W66Uew%#Q-GRRJ zWHAo<)3RSBm9yWJs%&@JUoKat-ElNSoAZbmn%<(UwJS|^Vo`ICIw{b#NKG$$E2>W3 zd+v7S3_jLRxr*-UQRYjv%ay0-$f)wZG;eF~L4?ytYVSeBU$t|o3M9VkQNhiYpbEWp zctAy}uSr$WSK7K&-i#x`r_YHaQ<;q;&mDmAyNFfJLNYsN5^Xv2NUo7Df^<{~NJm|b z3q#s6h=T!N<`!NB^xs5vU>zNUJ|=28JzI&|3A7D+<0hOA4kN@#uOWNrD#&rXdm1H2 zgHd}}P}G7|DjwU^t@3YsW|1l^s83ZzFKt<*iW7AKb>g0GMr)W{xTK!8T%GjS>U61| z92iEH{K2A~F$(E0x}~G<4E$FX=U-L1q-tta|4YIxO5IxNUsZWT*85*z_(a0@|HAN# zY~cU38x$>l4u<|(4NEorUr<FvbM*h!9242t|BEkAwDcK|OEq6B{c8*gL{-6mfl(+D zMgK31Vv+5$QS#TSb%9*K4(hOxFH@445ZCgESG$sDwHt5T!U{Ld<dnw!nss4;NfJKK z5<ci7X7Vs*3i-U9xyno-shO8K%oLTwOd+XB(UD9IHHrzLMlm5sYVxH&nW#pQiE1F3 zs4}-P_j5=`rASAfQBbe0X@jg4@*Kq0=w{8C0kQr})$UDZVVEGLb198>bqm4T;4Zuz z+yxc^X}x^`n~3FoZnqY_!c0*kWZC?Tbytfij&I5)YiIW3DXdMOLy~W)hh=CZ_~d-m zU)~gEtwE`!nI%ZcO|<1r?CKl!%rD>@9V8@LmDZf%LcNktsx0yrz*-MccO?;s-=x~e z9*`mrQ#muNOxT}Ug0d#EXsHWfk%wTV56ALQz5n`2p1}3nFK3aIjzz5ZnO&BD{XD22 z_Vz#2w9+fj_WD6gAN>1R^h*n0j(dM^Uq_#WN-^n`=cA-)c&Tp}<R!>inz{Perv1IW zpF_*FO`oiOP1$=VXta<KyvI4KJ4i+-c?;0{kR;9wI^Lzuel3))=DL2Z$Nf+EvUP#H zpvZpQuI47BWdCO!OZX?K|1U4O{`3l>_W?$pdab`~Q|GiTW5{wIYE<Xu96_Ab2fMH> zznK~!@7@P&@RNUOoh)-tr!gM#y2~mDX6wEcn7rA?$P_Z5a%8tIYNC{}nvRs+3`O?W zZR*_m<=fSHW^S0aYiS#tLRvTu3@{C(G<=3gTuKt!zGv$lWMISgElP*~TdYlmFY9G` zt&_lFlRZ@5@#v$F?K`T22hou>wg;BU!aSJV=>N8t(#a7BXBsukw;jIU*y;Lpder%O zx6&q8LuxR;*H!%$&Sd1V@{drkSk|D=aSkMdjnukw7cAN3NMxT*?+`a%EDVWqaJ4=p z@e`uoj!5aeL=FR`lT}ZR!5fpnTt-G%jAS8bk(aE>{v{8_ix0%^pm}Umw(l9sl`~l= zZp&*{7wq^MllmpAzUI^V^zo?QbxT-UxEp~m3i3kLdfnPils?2g*TNs%>cS;oEMf$I z>0pw4!}1{SH2;)Cc6pB1AxV-O>jCn?-=FeX61Xohd*3ujmgt%C*Xyi5@kN)xqMzus zguYe#-%59jzG+F^B#`YFFZr^874tz8%yS4+pO{mw;QCDJYlmX_(zEYgUL)=Oz(TH1 zWC~qkd`rpNATf|R<J?oHF5Ucx3!dMT_D@7-e@eZoMAjG&DLRDU$b7r3Pa<5@x?e(H za{TSBrSmex^b8%D*{m+_6Vt$ToO{3h5F5g$Z({!U>(Rfd*Ebz~*I60)A@z^e>zOY7 z<r@}B<=%IdYRxug1y2}Pw&!_BGxDI<{p{!Ll-@8&^iD>4@Dvib=y0aEU|Cr8k4&|2 z@5yOMd}f7D1|`V>p9VhU{TLmxcIthrcjecMs$8D+z2WV<75c9clEu7lRQr-kK2gva zt8(rELBVDg8J#&*Elk~N`65{d*0~e(;a37&kG;mE8}Qob{rXbQCGT^thg8`(qYqov z!IB~F_gxcICG{s(`y9OAl9!Gyd1B5#j7J{#8BP1W>>Zw`F+T8&M(*o;OnbjvTgQ#o z-V{}N{~~I+4dRD>_b!tpcM=2eC%5#(`RlSCm(5FGE?Z+Ju|FA!KiqH3l^@Fu;j-($ zYti}qN80KT>#f82I>z-Lr6+dZ-E}Xt(f{d9+Q7K!UFh#1|6tKOaY}wbMj^!L%l7`a zsjGja@(1Sq<MB6Cf9TUsHlt**R4);1ZCCHfe_A|Ft^?Iy!}Q-?k(5?40D8vv8Q1HJ z{{G<aPXBe$8vF`2dNb#*NXrjTV#Sn)NA)nJKFvvIkvaM%wa5nglBruDPVzustd>n& z!YfnxNKt=5r|(%SP9h_!ekLcYKZLLPQ>r!hBQ|3oRW*Uw_l3Q84f~tZhrq1Au^FgH zV8;{WpZz_Y);HE^cXyGUqd=APhL^<j!;<~6k11Y-$v)p~^O8wc$wcE(rlejx<d19g zz9Q}$%f2sb^fj)Y3p%>J>)#(E7|aq`y*|sVHGYRU<%{?#*)=M6(W{MDb-MAl1bq5e z_If;Ai<wcLS3#<OC#-*<tA8?UdOZ8b(sLSaxl<aMgR3Y7<;8>kl*=PO3+lZAm#gBk zp?1dOkpCFh-?tan!Q#0JyRvI2?;_$-IR^y#gW^AqTURG!fht}O^YG(gc6e=Q)<-`E z;2PItdV2?c3|iv3Xwl!?{c#;_UG~1Y-%CB|dY5n(yp>wBZY#PEXnHHs=div+(!oiV z4;|FJG&Cc2trpYC(8s67*3uidGGzLT07=KZmr+`f2dc8)>Che-b6u3=)urA8YHr0D zz3ys5zGIr{mA5kbU56pp%!atHt@Vz1K`m)3-;_It(A&F49N2;ueVsqB6$zO=6<uK7 z{}P?h>GQ5rj`oHX1^O3rf10%;L8?c~_cXoyf(HqmFY;-fzTMZ~t@rwt?1I^0g~-Q2 zYN5%RJ&SJ}bDmkX-z|-Pz@v-kJ)H3v_TZBJ2P#LpgHjQLocnL)WzxMpjLaO4bhZbV z((&&g2hs_|K!0;G;89Xa-{K@qLyP|K#IUzu=&58Btl++B`8Pb?jGW%cRBPR;t`B|A ztkr=XvMt0~{PYra{jOm?bwmEJpt|Ah6Y2rv$L^>`Y>b*pW?KunaTxptb|u6&23{sY zshf!IfE|>q&mxg)Fh=RmHc_nyc-ppfbz@>Rv$p@y@C}k*S>=>M(ppFmNB34Vy9tR* z`<u*xZTnAsNylSt@CiM<&pV=<nDXOA+(&`RPf|Cve#=>J)jAFn^%r#T2+-e)`Z0uL z!0aw{bCTgvH#hW0>41Fzvc&PtZVRbfLNytPh3LrKhevXhROxb^m1C`Aec6q^+QF!j z@AYnZeWuto9O|~%8b4U%sWR;v#v|j*9(CI*0ot->ap<ti$ATm0bpeh2!@9DYsy-<U zmoMFOU%{F_9P76{`oS~%cOqol!WV;ezqJDVv`%%ad`5EXW`CnvEkE{Jy>~0|%YHl= z^m2Xow^Cnq<nTq<l`p6IR@>gK@~^9H?_`px(c3F;ocd;?UI6+&*Z$M;Dp&G-TfUwZ zePX%$2yD)LLYy;&;Ea?t%ykfvtddi`kw)tILIFzh{efrI7T(E~Gv*Pw^EJ|IVNMWi z@4vXA@?6is#y5*^dOf#Xi^-?xbv{M>s*Vf8vLl_2%67yySXs2bqBnmOvWC3SieAzI z^Vgj8iw&5oJ|}916+UBX3(PGkl;o3#!bwOJUBZ#BXzJDIOSsYeNfCBuW73dqv9y@X zu9VkAm5%_j{B!(}zrv#og@WpH?UXL2sw1TTYL4-I3s~fSyz+Mu`#lbpXF=+Gmgg;J zE7tTM!g2l#i%agG<&WMd*OBNeOrUSZm=yq@$hDkn9c_9wb1fm8^k1~`K0(QIZzdgw z5m;j)&iMN8fDBv?IZNPHG9@cd$->nI#2{YBjH-5Ht+WJE)tV|UxR4!+UElZ&$OOV^ z$jjoGoW6`sO2#2V{#Zaw3uEcNSTIY(J*iEQ@jg(=dg@#av!{S8)_*dka5t`c7f0xj z)1mJz7MW-W%ri2RP&x<&W-EgdzuAREvB>r?w4R-!^wV0zis<mpLA`eq4(gAge9sUZ z-?*g^IVl+P5%Yc3AM!1+^8F|=_?67@Te+$9J3!!1k3#y<;m=+U8O+nHe*`JzAH9vw zYp!5Gg9`;vx|N9k0D40{chY-fs1Yo^Q)4Vx+&}wy5)QypS-lf7i|AV9SzP~tg3{+e zs{dp_xV)2+yt$}e5|Es9VC{is+Rg`k-b)Nw`D$F%zic;Bv0Hs1N=q;Ez_N#`dx;5_ zdi0scAZrY6!7W!krAI(on6e1t(?#F$bQlWaTF^vWMLR<qBUY|+s2Y#8Vs~u^A-;;R z$7_jNd?yI>H!S9fwUn;3qPbrTl1-RA13PJ(i9TbKVA<ql6z&ziau;E`9$8-m^fS^> z8q0<ymd*IahB94Z2ckYU9z(GSJazPavebK0$VH2D2x7)=ySg*HTbvn;Qe!eo^~W;4 z8$ho=!}dzlW*oQ`GG20WObaQni`W~((4s$q@@_>{U>FFT|0F`ipESS{vJtR1Q0l&3 zG#?0a<WPF!BgK8$2t4=ra6xwG-Y=*25=5^H%{2ocmG_X|dGd-g?^C>Vt#5(PfvWf+ zWJ&22$QoI+1?1OafxE02{{a$T!Jx!f;79xhsXhTQEG-y65s+q!h#gK<|AN;0xFqYF zL@iiMc18oBGA<X1EFtIOQ9-&F%^yiME@ESAWk8uJdG`{vA|HnI&s0ONgVcQ=#@+X& zK-S}e=P=qzec07M%yhm=ukT9QOb2cL`2t)L#K>?FwSUl|zY5bokmR?K*p=&4=gX9G zONB2L<DT1FQGZe=)f}ob<n7IjLqfQ^vyryAN8Oo_ujO3wVT}I3ENbym8dL|CGQ@o1 z0>9%U*dMW?dv17%wo`UfTFpcZ&ICqu2<ugJER(q6$(20U{=kYE>5oT|{&^5TQHzuy z5xJb+${XR!zk#;+;%o+QH_Xuwt|j#{cR{AEfLu*<W3BYYwhkb4Hrnz=kmGshQO*Al zrSa2H6~F5m$TwNECBc%PhN}EK=q-GVfEBFr5xQCj=))}_Rm3m<5Iov{hNG=Y-u$@6 zo1l8rj;c}HAY(I-n2f(w$FfR!xAhQq`KrYEBvUu?*%DZ?NCDTmThTX(Uh@egYEC4k z<%cgNF6GM+yDlBEB&wmQNra@GV3!13$X<wMZ!snf`Sit1YQ_B%>GjEGQ#Q~`5bx<? z$gXL|q@AkK53&3cmO-Xt((AyZ{PUaWI2=_@xrsJzfhEvEsqhGXhgNtE16B9}zql#< ziZLqufxs4TAjHK_5w%J;v8qlT3OV8i$aLNnRhQognNte6wghq?*MZf~y^LFS#A*hz zM%0Wzwq^|atjv_V<gWnSrN8mgkKFo<uFnycM_T~2x(;Z6eN_E#7b(5ZDLhgrXJ)Os zd-Gj3b&p%lQfV)E)ID#XOk74a5C~fd#QIbUhqhG0r1I#{JGN?QpeiRa%OjYyJi-_` z4oX6-e<!E3Rest&MxT;(JcZNd+Q`CoPB&+vFX@*qh<s!`c#9}q)(Z14EF;>Pfxadb z^oJ$6l~J&ro5HmE7|o6|VM(6DoE}my!}A!iA2G+S*1r9-2hAs8Xh3%uiN|5Kufv-C z5j$dONH}&ZR`&+pZ&3Gkf8VL@3+66T_jUY$KHGV0MJj5;%cDi<v4kXTG*x4)s9)Nt z?)S=fWR`zH(wfONM<u=LTWqLV^&A+tc+~yvDxlVtHu*Wx=DH#Gbf|S(pYW&$N(SLf z&N*K7K%z#RUecf*%r9=|G(0s`J=n={>mkqUsp_F!pEqOe$27G*Dh~*18mh9Qg6~1M zTEAsLntIrqSFaxKxv^b661<q+esv!8$jcH_=gDBsI+O{KbUf?O;G>u`W72?O>FI>X zaX)i;z1rYCx=wA_n$)Ns^>egWS+~G4^bE+5c>#{jN3^O(ce^{(WBKz^)MKrwjXYrt z^r^=^yhV9DF}g)Pv3WNkwx)OBbSkA3!>u-kXSb=1-S;-BC-vXANP54X(6RSCxn*So z#wTLPvc4G`FQCu*nUDexfDZA3;3gaA@exEQYYR#TTuR3Pkg5S!BawTgiI<1^3FqtL z)(WqB%6)2^dTMWJliK8-2LcB{RQ7xAYE#c)jp}K4qfI@%#q_CXO0Gvib{F~v%Ae~> zI;W0D_zzOhcPp-rtVA~Hy9V{lYtuT_v%&iosb_bLY=>oJmwGOCo=-j3Gao=1Z_}m@ zwBdr>Y4tk*^!<)R>CkKG&A!yGo-aQXLn#}n+IhTDIX{EcbsDRgTMT*nZ(%V%0a@Qx zO5sUMSRFH|7G$MEhAe^{C2dorP527pK}azpSWw9tR&dJ}$o*-oC2#hyjlIlTT=cGx z52zLggp|NiT)qS{i&E(=luGYh3|Sw7Y)FGVCe@9U$}gg~Tz+U=k+mH%2$l-@)o;ah zQhFJ_iq}#h@>p6S&&1V9^j3eq1@cV~>yZ2aw&ov8*vm4vsONjqo7D@!if;A77G5SM z^zR@?9z-;XUnES1t(9f?Ei%lRLJ+;**$KqObOa-`g=Brn-%d5;V9ZM#os=HNWb|PS zc_#TF6N#yBC#7JBQvSj1sCpIk?&c=cPiw=O_fZ<Xy^&pGHN8etGOos93Ehc=@vaw% z9h8DUl1)|Psk)ZiShHW~L`xfn+`oXu2!W+^4nr1RgXZA*@Ok%i(J_EAa?5Yx0t4#V zHROd>v_H(zFMaGO?-DCdt{-U2k<dTC56z}px~CB_Sx<v<3lUgHoA-H2F@K1*bsgZl zl#Qk8CJ!cO!EDSxvy0DQRcR2-#*<yBmp@t%Itv8FEOWqx*_=pQ#TcqF1LKjM9T@+~ zLA8=z<3hYNE~FH>2|ub%gs<vGl-9_K9+nS<edSbx2SY+HbrSZ1W{x;=&l>oJQdEG) z_S)e4kf8{tQLXk#TRlp>uftMeN3%IUjp}?ahMuM){%=Y(x1%q3o`c?Z+JW1FmVjK4 zgiU(OFSoOOJV~&F*;osnOIvXfHljti8k_5<qzAqhvCthT^~xoF$&D%K>%e3*8&!p; zN_DOe&7U@7{2{air@$95Iw&2FaX0g_Hw8*xS`4!lmKdL!smKYiRA*z-lS8%0Y^3e= zWMKTjgblpgN$6M|RkQ))#d8|4cCa0#d%6L1F$e;WdvWGgX^YTS6d)~%+@wWiJE#Kp z&=J}Jmgp?1)%T#U`WE=g{{gDV0<gp<a{{Q4ub+&$*a$p`4dWymfL=ho>#!vJEoz25 z)CgI>h=4^pKp*#^S+djE*hRGgeFb)yi(hO9%V#Ladx9xhTniTSV>g=RU|k?Hs-mKY zNKQt=Jdmnvf&95!#C+{Q(=)|;6q)F<c(5_G4jZ#ufqN1xCF5Y$)4(&UiNNmh!m<$( zUu8qz4j&HQfzpCykg7w{NTf#G?;T_ty#blJrL#nADlBE0^qTkMVB|a6+*5JJ*al{I zb0aa>YR3hcp#@J+a-N4D!B7X`JO>i0qQhAYsorjfd1E7DyU|zvF8X5YQ}E~#s@_Bg zN>2ozaUp!+C-9^EIgEQo(&qdX2R%b5xi=@HYKMnXA(1Tls0l+dc+MRt2);r{0uOqz zn-7b#jgUmg6R`X}^t$B>$&wWSuei!V=?dENWFhlhZW7(6(Hz)qr@D=j^CFa1cvEp& z-ou8r1qsuQsIU0J2fl5{>c5F9M-mluH$%$3*vM(zAGNTkH<s9edl?<h--xpzhY9B# z{3sq3K*0%%fpKUG5;xNkd<QL|M`i!WZlEOJkwtDrOTm1!L|-Ku-lLKAK1Xll$8Ine zfu&@<2hFl>=oE<l1dH=Jl!hOnEp!w3VpiIsr{Z~Df)3C9sH!USlRjgy5&j*0B@Z?- z$VY*+W=1Mv!_gexgz=&?Me{l$>6L?*u^ndfMO1k{r5fa}RQc;+32aP8>;@iI%yG2o zWDWe7ferb?7r|$-8$1rp5f6r385j?&L?W<~j_AkObyk8UREo*q+02Tfn=xc;qt|(e zn`*pgzccP>Fjw5%Ot9aEC3=#mf5uLj?uI$Y)}{*QHX`Ar!}kID%H`QFuoe=Ux`Ws+ zp;Ue^AWP*mTs0OF+C`YEWCR17ZD=`^-snD5g^r-A*QRJ8>I>Y!a80Kpcr9Au$D+P? zlpT<7kwZok#D7u%@*?_z_crd2ee6;sH0H>vF3E^3^uE?fV*UVgNREyE<!!LY^LcP% zH?fx+G(!&W-XA@*rJ*4BFX9q1uBNw&UgvmH#r-n)0`fDm$giM}o`%zbR8Z-S)EP>q zP1fR)!U*bbLX{!uUHqM!7~BoWz??4Jk{KWUm{Kqs5_l56NIUof8zJGB92n|IWz1(n z+!5T0H((<;!A_iCZy<Ezuv^_qwdP5B15Y+%QoQt^)dZ2#fAJ763Lf{-@d)ZGe}K4V zw$XM964k9p=*i?uPz}1M=IyXEM$!=yR|6ZVhGLnJ0s@i$Q#)dJ0WGqt0~hW}0a_G% z#%xNx`NyGB1%5?=xvQIi32@&FOu7R3!rqt}!q6L}Xy95T0$aRvETtp=A~47I1km?8 zQ49GIGk@$NVEbr`o(xNHs-M#3po%WXMsy7@^hO`Nq?779^ah_n%>O&;<5g5+<teZX z=^$2@!Qz&eTzcvA?SmyadMkkD;!)LY=qvKth0jZbIuLWE6DH$b;FkP|=Q`_3^3hVX z+6M3}#Ekc_=6>5loNFPm>1Zjo5{=3w$hx0wK<p=oyS@v3ui1#Md{Gws&><w9-h(jd zmb5h*ftx44&J4@T4bM!fzLi*WT>%LW>n4(N6!&ikAl4!1?`<YbS0NkvSy%>ADo$^r z^d<^ineAw9MtxMyj`3+-$R3YuaV;!`mte>_HWkd*!xEei+`^|Q`Dgjb*lzUI+=iI@ z7?f5`LL#(*-U<)#^T<&=(gvS<Fdfd<I#BQ#q}rFvkjV~aG_rCP$N{bBW~$*E!J;=q zXAPyGH3gPq9Wb}qG4yjIFyu@V+Cy8&FU;3<!B<C{P8#<f+VsX3`Jo$$YG63485yIQ zAUe-Tr6fDC&uGBGz36klj{@f%!ndOX7Y+qW_-YS^PNLUyKW&D*Hwd2^6nrTXA=;|z zF;w%Nk1-$L33(6&#$u|TT$nw%A~v%Q^>SW|OvGC7CO`%qR0EG;L;qP^pZuwtuw^O4 zLEh`<T4OF^<*$OO$nC*FIUE=7M1AlLJLEWsDF+A7WytFNytEOMp@opx30~Uts*=@= z*bZb}hhs9d6lm3Ja9<xk3J*?$xdIr0L$K@ool@Qc6ohYWhx`NM(VvK#KAF4MHDc|M zWSF0#RCNSe%11kJ--$<lIl1Y>m|ok<Z|Xqosd^l20A$4^59Dh~zDrOa+2tjDrl3Al zgslDpuhp`Z2b>@^7P+yzp5D@9C>bZxRyY|k^BmBZJch~OAbNuVv;^(+1~wq;GMdr1 zbrBM>pPTck7UeNoC6B<Y?*&3{fhxG0j({7_%MU<R#g8^va$w1mPxZVK-KZR+^iilv zI$|M!S6@PhKGFK$>tw>pUv#aSNXb2H8=*S`7~%B{MWx&Wd*$#Hy#dVL7OMKd;u?UK zko?URvl+3-(iE9i?S!NgYtb)+&qbSgRU<K74YO+p&Xlw`z+6Q~<#QggZWYX~%fRfu zglgqu5aT=(1YH0MZEIkRxbsk@Kcla#pC1IVcC5+cLurI)#7^wS>Cw0!ldA*c29~to zp`glJMZ#4{U~4uZk$*7?j3>OL&$lRb{|XlORL}lk=bdgxeGeq`5gooIKD2xab7>d_ z#Rp?IuK*WfE}o~OU)gYLCrYd3q~Q4#W|!RaN9QM_FQo^~Ly#yQD*DcBhFOj{RR_Qv z-qa<o!WZG5OYNPDmwm)IIf)o7Myz++StslBBHH{XG*X%gbN(JQ2W3Ig8*Xqu)mQ@c zW-Bbt5H^e*4oYRj#Weve{zk0n6K#ASVnu$KD}I1EbRVVCRU%dai?bpfzTs`)+hqgk z{ot!!OK(-88Iy<Le!(6P7`d%P;|F>RWuFd?Cre!Y=+#$E(G=SJT^(q-9V~fAY(cXF zLp5^<Y~GO`w8-O#UWZ~qG@A=dnAbsq=~&aTDjUzx2DV@$urmZwd1CRrScmb~kc}(= zZmDE(;95$q3{XYnhRU-MlhyT*@}IF58|9}u$Oi&>7${5Qi7NUvrPw_%SJz^r@Co2n zUQ1i>Edo~lvklMPkn&$>3oLF0^RILmr%?4wq*PJS0bE)CjPu(ehXBYt5<~g(F;wzU zJs}QIEsbHLbSh%{jO2a>F=Gn~^x;lFOoht=0NN{><q@UgQDKo&c;RG9HLFpjx3Opg zrC1|vE=w{jVNeA<9_)VI47t#OgLkIV=IcWJOs?NO>+mfuTTCo0N+omjWntAfNQ7pQ z7J18M>P~D#Hf(3e-lt@~jSHcjXpXcZTNJdR^q<JiNJoo#N+)7JAr{<4DO8WDz~i`3 zoGDr&FuMz3_RsbbhzDqM4@E3=VG6TWzEXA#s3X`<p+4xKBfO~r=G*8E-31_T0kXa6 z{SKO|@5YAfa*2?fRD&Y{6g>~k)!Wm=Mi*F~?*@TPFyjhJ<^xTHZa(U(<uNGubP6O> z$h#2Zq%HtSW}Cm%Ltc>TBuqMgh3EkdIWSp$2dZ5CQDx3=NBuQ+lHn2Tnr9}1^y^02 zUP6KY9&A+KMs-HAozar#WdE%w&^uj8CctB7`7u5M=HQ5B$QDyA+>|1~z1Wark6ykC zPeeAfh8V>DEhBRvr<=Tk&>Rr;(H~)s%cDX8H?BM*LW<Mq&<8o!bV{*Xm?S|tvg>Uz zAWKWhOYj+sh`9S1FTJ~bsFK@?q8n`}m;+1oS9T(P4*2rVL5u$zY?R3F>4NV!Q~kCL zlk<?R+}42&nXCF~qGUI|=|e)`E=qwh^tv8MW`bSnMm8BSy=hk8&Onunvoj6qF<x*F z#Jv-{@jHZ1Qa-YbUPHeAjLm66%K=?1H-`to_o*LL^HOM&%LeyFi22WkxL@)@WOoVP zOtqi`%<<d6T)71W`a&)Epl$!6>Y9b2@CE=?Uy7kn8>@HN4WGHsgA3KfIeej1Pw_#{ z=mtv|kMtg@2Tvcj3T}frD%l=B0OQdO5V`2)&s_jA_yL)19Ezbl*?P-YqN?b(c1SE0 z%r7uq0`q`tj6@=ujYq}2=Hg9jCw|OEvvDY66nYY9=27@jGQmN$!=auiy0VG;=K)yL zpVb;q!B^nHMtB42L+4;T<OQ?00QGt$j2{ED`8|te{EDS?T!E_C?Jx&xVb0?{BCDQ{ z9NJAscrsO;AcgWYTWqrt#KkaIE=He!Z<oZS6PBk?S}o58m5(;k_A?QRbkN~_7QXnE zbcCk}ncV<$j8fHesB*p5j5Fs;Vs3!1;2vyvo+l)}iz$W2pguGoEx{>n;xdA2R6h8V zcLXYshpLL38j*Oyhf?`SJ0woWFG~i&F0pYK>Z|8@Q1BgqoCjgDSRW?z%5{q$eVM2) zzYmkapqFY7F#Nl*Rv`DvHO)jQaBmaj0c^N`K|&8yNbVU*CtxTz6wM(?QC|lv{*!4d z{RJ(B{je6RLpF9ZRp0aQ#bqbaS688bBH_QG0ezpOfWD@Ssc{Ew)$?d8`34E&0P*PT zZpb~{tw$XgFU^CL-Ik7B*`3W)V3@6t&{`G+=fPMDy@g%9kw(u&m41t2?sO9?AHC6; zSPS3nWq5X@IeHvv<~$g?p*Z@&_aj>|98!D$NIgx>_>qZ8x10ox3q17753u!(Txf1P z=oc>{R!)=#S9YM~>aCF5xN8X|*hoV=QJ|0Zdg;?gwUP};lnlYR??WWaNe&cTgl1Ew zY3VH53d~IOB~jJec2t4DOo-UQ^p?MlT|H1WMk;OVFs?7?{Cn{-w35u$Cw+6BovQ4d z{uzk5U)h4v?*sUjfr9d9#X)(-@tgrEigcm$e0oE*Ot8YosphY5q$JNIt`8{{nc}Zp zy%=YE8Px_@yptPHa4H><XXq#%hcm|GO~9Q6(U(a@LHMd<ONd-culIX8%H{u9GKM69 zR90a3uQmv;qI$Q=a{0o*ybuy96OTF=?gCkwBb(q0os<Hf>^OmIq$C*|ycGn&yXXxj zFcjQ@$>3Ykt8XSwh#_`5w=??nfZltmq8=Rlz8M6AyHS5TC37MY?w&3><d_mzMn}+3 zH828bp%O}N`RPLB`cBBrw7IhUME7Q#$rnKSq+l#UOyBXuVn|d@Ng=7_gdCg&NaGWT z-hjdz8VH0u9UF@%>21d=2daYGP+wI_hkrHB#2!OJkEL-eeBsfU%$M)TqI)}Fk;8HH zq-L^M{$QeK78BNU1$^c^01DMZsxC%L{=-NVji)2fhWgN*_z`nD{(P`5co9RfWw;eu zj7NbLh((@3Ouq;!ksG<lQmXn2&UqmCVkdybmyd5X(NuEx7^*>e<PHo%i~BMgrOk+$ zv*^{y<#{dze8Z85eT=Hg?Xc*#T19?JrTJcrdopO7L8<(;4)k5;A%0&qqu_NrVo^GJ z<1DYH0~z&bc?{2kvaW?pAH7dD62vu>;zuK^?;c7ILbmiLFq;<w%{-Tmz+z;B|Lml+ z6hML1X^>wW^vV-+$skI3`GhItp~INhO2-B5=&K@aBe&3DK8RRpLlf5SplZl03wTqA z^G7z2HU&ZOA)ZIRh3H%2s&C<&kpxSHTm0CEf*Hd*QCf+$ScMO}*Egf@JmC6o6p(tf z!nBo5b2GsTlfnE6eBtLHk?*llVPd!DtHs#3n6|tp9B4idKL4YTSZyPKR@4h2H%flR z1>-~jh2Cg`$T_y?cVHMcO8Wgwi99L<<fr?gVSo(Xij6Yf>#E?~20XeB1@4{F`wi95 zHdyo#F|rUt#baqR=Gw6<KU*&;Ndwh+=+h4uy|dzGs`^;wz8{GSIZ%b=Z+gmWW96KI z?2Hi|xbPl+6tqi(<YG%N00DzueQE4}&$E9SHs1nT?^SG`4b3B{hL27`-$Jy6D&$ak zo-6=QP%@fv+We*wd|hZRPD6|P<z~dLSuE=n(4u$H8y<=l=XpLPc3|Axhg&-NBeKs# z52mf^yCzb38_arDu9QId8X<WbcA&l!XY{$#w}Q6H&r?yb%7L{))>A_kEqy0l{EZ)v zF2`DEHxj{lpfa9-#9y}4A+Ja(d)iU;0l?$$!x!I;m)ehl-6?>SQ&r>|AE>sYG;lH= z>EnLMI5q-#_ul)SDf+c|o{Ca^jMo7R$a8#fTmUVVXmM8JrLhs>k~qiC23qvYMyg|2 zfg^H#67AQH#9g$N$c2GB7v|8n*yw#@cNba;laLM0?_k`Lfm>k1ZtvTh1k4`U#%7G9 zE&g6A9kM=0SKx=XTd>wcNzx~@5LEi0S^l~gEiPF6n;|8e>cR2=h9VDu&q!`Y%Nm&V z)1qD(3gy#heWe$Y*Rs{Cv8x|d^~VK;<FMiV9t1iN!NFZvll%)T5v9AZ7Q2IF(64{= z+1CF!V_tBFpt=AA)eCH(e+IGm(d{6pS%lNqbz{=)V)W(dJ<x)dfE>%*dT-oBwMZ@{ z^x-Ld1rqvYSW%d^@FfE1^bXpdZzgo<5OW<Qe|Z<`W62n5Bx%A|f;sRTF#PY;<8Q19 zq!EBeKLtjm94~cV`QK!;Dpxj=qIr<$TWHa5(j%E@nPD;iGjT3%ptKE;z3bmn(l*e# z1*K>DB@i8y=5pdT<|e_n72}mFZIFBE2tG-2g;GR)p*SO17e5$7HL}|q^5@M%9bTOF zL-b3oV7b7Ug-O4-75cy8&O1t~Dr@xjR(Dl(qG=jvkSG`t6N8E(>a85QsybJ7ZgfUW z7{=^i0y>e=0dp9Y?=y`f>gbFZrfHfE&<1ow5FK^U5i^L2+4tMKu#2gu^VVDIt?&J_ z*E#p>boSZzoOAa*VMQ45Bv#YNj;sM+n_PM&3x<-LizHmMcaKviOj!hzJb5X(UP`_% z(-vm2M4ojclL7nehmZwaHrdEwWD1ad4yoO7srBRO`<ZrOrk~rRquX<IHLmVbOA1;5 zvb81%5NW{x#?cLGdiI#d>coDsOD!cWwNO2+3d%rF7PL%Q>DqX$uO%OB{;x4z*}WpD zg346aVa}#c@B=-sFLsNQl-RH4)*c$_6<Q_`YsX_{rMqVg6ipG3Yosq}`ZPczd!UFl zQIM>M9IdCf1tpCwYgcu>s2+G#!m2FRh5Im~2shtaw{HWuWOY$;mGUW~_;^aw7tj6N zGuhxG+uOVQ<bd_Gb}Nz*$G8_1Lr!H)vz0bk+PZ)1fcZwRZeX`v?_nj;3Fw?gmQqcP z_=}~6om#5xGiJle-o~bg1+%;?uByTYqc6)qU0Lu~+ws49Vzmzn-CLE^PxcV0T)jX^ z?V>9Nb*HT*+inWKUH6|f6+!B@sxx~CK$Zqji>Ldp98Z>X8nLb((5G2B*f?no&ob(f zVqld+k!JgJy9i3!jx^mBm0J0zswG`uprv*9yiNKheVO!KFk}OSDx~$$7gqNfsyTJf zkGV68C2p^yt}xwn^A+7JFxwwH%O+bRa|l^I_%_$Tv$Y<MAvn5Y2ydQaynqA1a~v$G zNs^Lng60&+RKM)I@T;-sWYR|GWnlXyyaol2pk&4j3PxW?K|vwXw8=(o^TQz5OIF_Q zvJwjPNP~i*2+u1;Q}dfH<E5Y<F5}JwMYlKUwwsLoa25MEeK@cLL-dSpt>DkPp6-j$ zzC;CC>(Ea+54BZVdw7ngG;Kq23MJJ`rMz7`qg0z`NE|)=4HbV$QK+VBXCw2cCR6*R z1Zmdsgom|$eZhTbx2#RAnH_M3q)&<~r>eh?zwA#{RiVmTv1DWmO=dj^=fHNFwqjj? zHy}<6dYWk?Jxbp@n*#6b1p@fGtC>kl>&@U+?tJhha_o`0isx=-zLw0^n_hsT{xu3B zJ$%R4zYIlGcBSan_~4<M5;B=$dp44dHZl0?>3d!|&m(vU46I#IQkUW{dX9hTus(*i zGuogyRP>YCVbukUZx!;bRyLyxF2mth`v{=o3EZdTbezGe$);fK0#aw8T3J7?;u%fd ztU2iH?GMp4pM%J{A4TNvVBaA8^h$2e&0g%X4&<2<KDG?ucOdL7Sq5SCB4h8p*T`_4 zG#C(<NzujV?E0txu#=Q~I85CK_6T6`cOQXB_iqsHKHDiE3ve;dJQturodaa+j~P7s z%8xEVbZM;2AvJgb&t&&XbuF}&92I1gWr6H@e<72Y$Wls=l!CW-=I?XB<zNBtZ$yn> zp>D%(22qbOlqO^<p4be(#O7t#Aee{MynPs=6fWMg;8%43tjZtrW69%ao{FXjoTvAK zH>L-aQ|(x&qzk;qn7X(h!Y-8~;Ou@0LGB%FyK(P=v-@BKx%Xs&yal@iw>(ewI|S*C z&{Yz<@$|yp(~Dg^yM(hR3`pQF-qVogy$Pnl1d2EIppf?k6!OY+rpgN93?bP2Lg*aW z=ENEottWdqt>wz6W&AZzSZBf|a06D0#t<Hq=~1~vIy_M*y#D}MuX0E{L$-1?rIvh* zdTyj3?1R1g4Y)M*<skCW5C!rV)YHERg(ON>@i@=Qo^v4WRTQtx1<&ilR`tCo9*8Ui zuZRX{EaGgXngXFX)Q-sNZulJpD{sCZ85=jCUmwr(YT}X{Y^C=T*G}xCMn6K~&&&c0 zMq2U);%Pmkru715kP7O5ji;!BtHg)g02&0UK~CNdETIxlE!n`5JitR#Kk#%rK)V0n z0W@RyNLcAjz<TSiUhW%~*hBsDY_ykaG)fB)<XVi3;@1P1X(wcQH*dTMk?~GU5_%1@ z`yPd9@!=REdByD_D8dhwBWM*B)QfH7lcXsgAgdSK77r_d)fbS3tAhCCiO?oa<kwO0 zS2S(#8M47=P~6vx234JOK}~-Z8Q-+fzuXmu)}X#tP86^Srq(9#tWEu>dnyIqh0w;v z7xQj)p`WGSA5*AS^<ix&`DaqT#y%RLo@`Y9&P5wTfYUtG=#mY*H~}Yuc2L2F(-zW< zw`am-c`KzqEI^^9WJ~@FJiSsimROIp1T7e=%ja1&*~)7iG`yJ8n*)Oq3AhATqf1o& zcL%S+x_UFIzq<=NH=#jk6P2m|PO2qA-g*-<S2row>sY8%zDZni5{&mo2eh(TT`vr+ z_)R<d{Q+HkHgxvcFq3~7`W2s!ppqLXExoyy)U~9dcjb@`qo4O?QRAr!@TB^B{ii#G zNn$sFT(+5-M{g$;yxL3Z=~gO}i!OCH3d%Ng2}mzhd_T0V9iSxd?DQkUFRycngp~Ep z&f<#^S-+TU__<!5-6sXB;a4JG>ZLQNZ^?_L$avLG&39*^n)IU9Zh-e3%vpLoo>O`} z`~5201_KsCRzI9=JoR&A-^o^(Hca!O&<;}Wj20Ye6|HY=r9s`Yv^^-lT&l0hCcBwb z`61{WSWC*<1G3(U>5&mT<fJJY52h8G_Ru%Ec1Y<KkloKZNj(st<{7O5Fis>}M4m;q z>Rd|WB?t;h<h@Zof*ZDMMAO1<xX9S!dmq*GGRkll^$mAX-^hU|6p?Lx(F<{g*kowy z?3k`@4D3t)N~&}X3`$>tL4|zv7QYIY(C&q>dZnH=axDgU+bNacqe~VAd#Au9=CYH$ z35u9K$m@48`su|l)>F{NuZApe(*kOA2f}sZS=ITFg-bV}Y14XW^FVR!hcwqU0QaTn z+;lC^+z<Bw9)+JTqaSILF_>>QOg-;{;%}nT0Tb=@j>For01RYst?I&@HRG?7EE|M; z>v&Uq>u6P<%oHlykx|+H68=1!ROD^g``dD0dKFo{tgv<%s=0G|C|!rIS#R^2)=LC3 zaT4sKp8>4A7`8fY5TG8zTAi4rY#4Y685RS#kxi_CU+r$H;F(1&gEe?lq#{6ez8wvQ zg=x3jK-OykYnJ8VDtCma`5G))e3)c6QW}4v9q@8Dc(=Bq#)?d;^$vnc?}tH9E@^6P zNBhKIeN^xS*z2`>B{w$X=r>}V#t>@g20HI|sG+yvg__W+em5wd5k1(d8rqs_Xyb>Y znp^(3N8W@?udCDlQPv-k7T%0%!T2%?UR!`6V)#|##vtlu!>_oo4XqlnefYO1*_04X zKSAfFeh2!!AnGn56&-`1^6D%=trIX?vU4ddpOXu}^=RO_6ycs8)CkCVRo+HKdV6Tb zutd~dv$9cg6{%1iT-+aEt11b_dcSJNJ71@ztMYh%tA@OT+<G|I%qj-&Rj+^$*0>8~ zpZsJ9jiEGl1+2WYn(>2!5UEq*)KMNft=;DXRi^eOFJg%FDZ4~=^t&hZb@y|u^fOW! zq^9R-p9RU2Oa>;~$}bHiIoTFn($r85^oon*%zWU}GVP;Kw}4xRxzSl-0Lkxw5jL_D z=3)EeyWlJ#R=KHouZ)3Ny`Dm$FI{rqqC{fcRc_;r4)%Wi!*f4|aE|NV3-oJIPyf-A zrHwPNxUC02826{e#+#kXXw~eG`;E75dj`+Rnnu=fKXP+iw8!ATd5IUa<DGf3d?4q0 zT3SzxHnz3$tu~M5))Urj(?|_z-N2Wo^o}_!g?l>|pnVG`W*cueXYv}1$iM{*+2o!3 zDactna@ef&!SlvH^Cm51)77jfT5&|1ZRri5=sD5)mFk#4J@nX#)Pc`K9&JIc7eRJ; zyY+DA<hjmQ;atucD4umBC}Yd;#NlT+jemBYj=Fi5FF{aWud%)OBfs&^qK|?c!r?>s z=yQA7P^RZF?_3P+aczK@fQJJZM`r8B95*$;i>!GJD7o_eJL*;Jl0TJac@xPd4;Ze> z0$Gj_Hl!K#+_${`T##p?!I(EOY>@|kdS<X52&`x5>WBNNzhzVNDd>`aq?qK5cH`Z; z+n~*?q?Q?1Ib<gsslo3##X`s}kRO```?Uk6^Z>#kfc&IR9(UVJYmImfGCeqC`1eqZ zJOvxr)@B?3DxHOw*e7A8;b)>+!N-&iDPhZ}9xS{6kzq_C$H|(PcN+g%zc|-;FBIN@ zZ~RDs;{>=EhhmcaPH6QslKkN)o+sPB^h{aj1=$#O6wjP@<iq7w3Wm1@QK-*pytnZ~ z+Gx<H(jGTu;U!tpT0^sp9U+;L*zfK&cFg@}4l*L0#{0$Zw}Wylf;8?}a5P4=@k90f z>sLc75rPkXzY{abNv&_mxf=K%?<HeCK&~$u*@&6OUe=2SPqgvuzJ)MwN=gp3G~_O! z{r*Dr3mSqbbPm!6oh#scfKv)`9$jZ_?}GC?u_eOU28I1eQAkp7*qar+ce~F=gHcma z(vTi5^BzEvS9HFg0>@EZ90>AM8)xU82WL6U%otzY53-zrXv*3YU49{_Y$ngw6O<1c z1kbi?9iH<!C^k{Ue4_`lxdG17`^$PbOHey|o0DhKp=91oHfs(>)K6~hU-UGbREWzR z#OxC0GxRDJy){ZBXeKf~PWDLBeK{^d7h@=?WUptq$Z8x;`#<TWQk|gfwG8ki>Z0MS zRG=cQSJv=JvZEd7@RAWejooPHI}pH8ty0mqk!z&pQp=&wVqHUGEskAuUxQBTrWx`3 z<Z*FwFyN=IBW#SdA7DT0JetgZt)G$U%7F1{$1!eWXEf5wB;g>uGdLv+loMeZ`4>*u zxSr;5oeaOo8@RH!tr^-XvUM4x!k1x^fCXOS0{FRq?SsKQbn&>`7=+uPjjqNn@h=?= za=(>=+3P`;-|p25(B4z*gY0t{R4gP_sXrLEI#I2X*Tr)TBIAcHKympZ@a3U><5hHw zQ8_53Y8y<W^AO}JfM4y0Xm6b@x;ze<CliWzlmcIS3q$X#JoA_FeuVn{6#TXo<lj++ z$f<Dgv{32Bhj>>0Og9Bbi47{z&x#}5YJ@>@;PXJ?mG42%C^%PsNvUTX>Uze(+1-en zxo>STc6J0inN|6Ej9tYiw^LdGO6}$Ap{)z=ZL*Mjnuq=_6vd*8g}=DYrYmsE@#gLm zpMz`@GNMQNF!2omY*5n&mjlr+$R=f@+Q1sFS3DR(J}$~005lwoAlECTyt0O`Y!2DT z-vNzJ@XR+J?S13hU~owmW;)vsa`s@dUk5PBE(dZSLz+8|Bl%=vT|0C>Rtr$zzY4j2 z`3DsK9F*Wis9XC~JAP1(R{k4l6#s27^~*cx-@4w|_1v%Up+F~Ucus(Q;A}hK6qp7Y z{D2n8-U3<RX|xJXhAeo56O&wnw9tGXf~JxUpA2nyb0*+hY!#V|$i{0IAY%|H5g%H4 zWWWv1h0H6TH-XYY6l8)@b5;u`c>xCT!M%9ckLXwFgsHU*4cz4v_%FwP@lG_TxeDa^ zJf79xolU9CJH7IQF}TzV-f!0fZl_W7I*`~ItyuR)K)eC1>L+)j)h-{xE5Hj>^f24( z!A0w4hf#c$gUQ{86ja>{Mbm9<JljYryc?9rKSIW5)?-d4t|RRNKrWturoLmzM)G|C zh6E!rJjeK~BdgQcZLPq$63ZzKZl`(5rGK`jQ;*09>Jd7kSh7K~w*-yduS~`ek#j?U z%Mck^A*^nof{}aufcw@XXjeC>$9Pt=9yR>4z>Du6K-~+;)=h3UKCfCyY0LxDXmKwU z97{n-26&AxQsBRqY*RgkXxd3>BtW*zPHFH8o&{cN$M&bRP!I)_Ue-qGD9Fmfw0^}& zP*g1JgUdY>MBl{uqwfmXu@LZK9&#TACH7|lPjms+gaA)306e<^uvW5NJ?L@)$kyw~ z4TL}r&h18_TtLm4aIS6TS>m%rRBJrQ{)=G{m<|Z-4B$hDW4b^Wsj@YGxO@wjguH&P zHIVrq%oO0j8t3GKcQ_)0E5UQ0nhou!Zl1Z@AzRH`5sy(z*9o9l6046Lj?NJY?KV9^ z*8e%F@>OWi_&5y83|h;-8M%SwWJC9L!_Px1x*FA@_ff&(qagGCD+-q)2V7PI8aIIA z+O{5Z(uEsddBH-zqQ}(Pbx8)o>%T^|+D{P_ls~dAS>sjtvfcQ+qqc;`YiI*B1p%j_ zU&)o-#uvUTY@{B8y?z$x0XK;+o<~WUDa4}Pa5+B*yJYp@G#{c>br9nOFQXtjH4DSK z0U_DHP<0ih9&r`x=m>S2hBSYx=-da2zdC4q(XnM7S^2#Z7k8|mv4HHCXli}bZhYw$ zv%9xp?D{7ut-N9hmA*8HpdW(9mmS0CT3mCRjXin~R(YV?*wax2zp?`{n0F-#S?`FT z`R!QkUD2<tmC{Smpu7h|cp108@?ApB<C`e0J+jUC>b2LMAb*cSo<c-MW%60|pcn0B zGVA{h4N!JC1!YH~M%mGQ0Qqqf8Inn*E9{7r(^jn$;1_?9tlI-h{L5Z64TD$vafsIX zu|<YOw2B{&Ny_KLIc$h(*Z0u0`+?H%8o<>rN}g^vzP4gitMcJ43~?;9Rdc99)yu-= zMbwSI1YW2b4eEQ~5?stP_fj<Q9%DzB8(?o;0J8Nbq`B_RrZPuREq6^o04?MGwFBg5 z7to+n!1Emw1n;(WC?p!h=K#W4J;v8vg;>1w9k?VcC~9XQBkF3!E(w5-Il2+}zz&5( zw&S~`pOk2Y^|lXlRzg<#UN5OqvMvdHxaUJwlkLO?V|i93qf6~t7&MNj1*3oU<6N&o z7P*dR(Pv;9-R_l_iGo-!6qNytUG~>L-j8>vk?T1WFDVj`pcg1P70xARN_JZoVCQ-? zcsd7)VIkui%cL^prD))K64SZF!`v4F%BLXG6Gxh_A%w_#ppD+c06B9q$R!^l(mxhf z{;{BxhlFf81+^cMbzP07o|Vm%R(4Y$Tepg91h2E*_-1niR&k~VM*L1EH6PLk%0y6n z2L+99%R-{0gi1;u4#H&;Dfj3Fc*Mg#<J)!e$8P3i<d!T!M)A^KP<A?v@AT*)SJ+3v z3e*VP<ri0J$DH+Wsd|}g^&|)2DOlCcAmu)xm`0g}8o`-l6Qe;+$mGsfM%J|q+m}3m zIek}v;{OBMhhnY9cO6r4LQfUhrUTcLYV`_0Zq);RDzl~y+E1WxJw-w2ye#1dUNjz{ z^kE7b$J?Qpm?;3Oh0aGmzpWM5YeueTu*3MitQK8reho$A4&0}yWih6k268!F>GxZ{ zu<=X=Yil`uY}r%rs~;4mzJEdk?_<a)zpqQcMuwC3`vIQ`_<|#uDks9PGD0&}mJn{M ztfc>|tS$u96G^Q+l^476j6s02mjW(`0j`9$QbK=~&G4(diJrCcE}{5i9^ffrv{i4w zs_KIXU>CGipOcC=5$lUzL^gg8Oyd$6i$B;8=#bLq@&K=pN(@GBV(fCj1Obzu155!g zQA0x|Zh=AKc0$yNw}t?A45bBa=v?i9LAA`+s`H?zE_?w{1cT}c=vO@##j6)d_6~$s z-}^k^L8MjxO&Dxi3fNi-c!N+~wVdQ${Vp=9cP;_!7FK%%<p)q|Y$3n_F9Ie}P;+z% z;25f3b8<eQ65%zqg8&T(sySP-a?fbZZ-w>}JgjC8a%--pMm5a>+R&x8meSg5;ZobY z2+%Ii7RLZLNQ%7_Kh)mAu}ZZM!L;@X;%2oHFs*%$1DomwEdh)e44AMCP$j8q$m$wF zuDcvF)!mDrI@!8a_Yh51w|X()X~Fv&4OO>M7`!2++Xeg!&UGJ1>Bn%X&&dM}=2`vF zT)=+N)=z|U{Xt6slW@ZN<6Z{1<XKQaECM(qA8<|y;Cx|l;UK^zlD$H*a|PwLe!!gq z?iaE@UQY)ve$_BvKr8;zu<!-I(q(|_X^)1x5!vwHg8}#V8$Z0UTC#GuTEky3PQ&ZM z;HwDW#}eKY`*VP;l6oT#@D>_0Zbwk#o*1u71~Qrsp`huo0>C+&02fffri)?FbeW(m zMT4g6p9kCkS<|buOw*f4YuZ5rxNqRKS76Cnob=Q%x+ULdyogPU5VQ}n6lk{iXx_`f z5rBP4pQTg_D&@m2A&RF;Qz_+~%BE6!2&K}do@pv2NK?vLRnt%I)OUO;lA4Nq8V6%U z+ud|Q&O7PD^0)WW(;P-vqeDKg^-|p8ADVfV{QRe<(cgQn6W1|DT}-y@86TyoZ+7|e zckf3Bko+mItbgwp<UUc(fsm*!f=LU-YHTy_;Gk(6y8n727-1)o4i~+nh9SqaXA(w} zc>7=tN*soZ8t2U;@DbQxW;RIgt#}%ww9C&^LmD~fbl!!bPvZi{<W@8D`GDQD#{x7! z_EtFS!Myf|HV`yf#k;CkL+I+5RLC*ydN~a_W<TEsg@ewV;G^l#<6oUe_3RBanvWKk zj%yvXxnBJ?s%km$uvrn)S$8GQ>LkQvIPXWHtTj$jy?BIPXO??F7D^sNjr7(s`*QS4 z#u$$PMQ$xKbjgi#B4nv+C5D`YQ)h8GkKyQffz+N5LCWs|BNpM=BU+ci<)R2cW04QQ zCGWd7RFmI!gN7CZMq!OX;{;3;a1csrXj1PY(a@xxL7cxCopXQHjn$SyHtaS)foz1< zTd|XI#|J6RsAhOd#U1l!wu2%u#sUU#BUffiMH%1~`k~F~S@=^Faf%q{Xpr~0g=!=< zmpcHA!l9ztA-QID)e=}4^0%+B2#+Y7Dyd^zIbF<zsV+^_r9=D6YNf)zfn2y2xe_8Y z29LF4A^EYSOW|xQ)|vL+cTqQAgL|^-FZ1eVN+lNLoZUsr#xrA<fYaxZTM016k<FOV zNlMOGHHygEO5A`o09}q{d`!v|Z#TzKs#oypJp;yNon$9cnsE;x887xo$`By&F!P=a zQu(Be;XF&O?2;o$4LKv!FaYKtn`j`<WPNAOr*5kCHYwXwQiep(j0v3p`QV@}0RJFs z${$K5Avn_AfXpXF(8PHZ5a~@ubqVW|dfN>!DM&5%<Xk_~*)gos%&MLuWFN0Hv&d=+ zAlHfJe@W9?y&k|}HfBZxi<kDs`sOohk(FJ^E@&Xar&nv_&BQTvIpAoCuMIzt2F>rG zDa?`RJmezUt!S<^UU?X4+mMm=S0?=k`T_PSHGX!TmP9mDeOPK`VRDt34-L|~Qcp9& zRa)4&jy`b*AR<+@rey#mAeoA@CZnNqJq%voQ!W3Ulo|(-^}e4$c4#lN(kr~oullK0 zV1<JhdvF)XZ(&LIAsY~M81CIT6EAMOiLC09SdM2`O~_{Ay0V24wwY25iE33Vo@;}j zhl2R^ec112x(AK8)^Eb5u~ld=<jMKG6~8At_EbcgC&Iw>n{G-al+*aAlS%oA9&@l( zrs^a3`A4F2UIbmr<a16d7`>DxYx><{CcS6zEaHGwr5je(oq&KHE`cmi%H>~v@Dxz2 z5Uj#C0wTxeFb>KBi})WXXc|Oz(BmjH=*1q?_%9!`py}A&V@InoQ{n8J4q5CDCj;p? zm|Cmt)VUuDn;j#@olw|j`mxJ5>yh@I&`M;-w+*ZLu7;w{3#&}=KV21(5Mwu92u0$n zR`|&<R`)y<8UqO52z&KY*X49e`VXymJX}Iqv{p#My^#^{^Jf8qO^~^tY$Fv#4Q+GZ zcL6k*jKRtu0o1HS4L#7d>_-@sU6_d)Z$nYKJxDgM53;wiNfq&I$WBt06V--HhrNDs zyXEIgnf#k5yF~CVqaeDZg}PnYiyvfBs&^SwJSG$);F8Egm+a#xb;);B>au}x1q=8w z@<|R=5Ou?+plL|Hn4{$mL@uyn=NM*+#lQ<)(+~SEBWOAqO@m+b!1)~181Wsc8V6iz z#%{#y6Djp)Bh8!LiXU7A=f-c)$~}yfF2Ib*7@)`I*Ni|~`9ahpaWAQ1`S6QexE_jA z5m_l;ZIvT==Gj7{G_3bidI_LhzJSV~f-F=6@QO1Oe$mA<(aL(0XYNUG8F>}74P8Ef zyav^m_fh%?J(A}_Y;TRF)6&D8>sNqWUWCrk4v=HrQt56eN*>Pu<sP~;_tnx+`I41> zyyWp_DB|E*PB)$?>)TzEVOqA7XEnD9`wJj*wUBj97d$zgSI^`PB|>DI;F9<=OkJ;m z=bIxGPO`z%0G3Iq_i2Fp5ojxZok78k6ojoVgvY%I+K+7c9uF3}jMC`iWGn2=07o~a z7k2LL3TzuGuxVdh;YQlP#49!bGtagoQhMsN9?p^;F4d8RYGIZ7%aiOyTjwLKO61O7 zM?L-<P?I35H`}-lgn>@RlGipYo^KXzm=VOff*f5$X|N8Uqk$`g?Z-4{pya@Sp~6#| zR`i2x9SRsbhVCY<=U3$9|D*Bi=yQg9LB4%H?5Ci(s!{trGLpK8HS~~HUE`lxr$1c& zx~T?f>km?cgEIX&;fj)GA^ioWKj2do+KuSoHGC_!(!X=6pQ5C?@O^&P=`bdtzogki z*ucEC3)^p`@$|K7xLE8m8|NSVV=GhLv#_(aDooA&8vo2olAM62TtH?S%FnollzAuY zmFd_7?~x%MG47T&+~;OI){wST{cI~e=BTQ#10YL76_)Up=@e&d6d8sLe0C2`sPmWH zFDcExE{kV18O){L>o!Ntmv!MqLjZ>Kwt5B3;1f`{;Kljo=mIy|7p$bA#;_3F$dD<v zAq{Fwg@GXrY8-&3V~(a*F~>r&ZVlbiptY1b&%>OBExCY&y>NcElN5pJ<nPv%asIh3 zU&7@N*GsTk{;G0|As0(6p2<Cc*PaPQ=5+e7j4^I5uMaJtbGxRUYz9>=n3PF9jzSl; zsajGUB<Z66{_&IM!23*glJ1D`<c5V`a`ZF?sW(8K24kD!uN6D%r%@WRj>%~vb&bdv zS#M$SUAMOi^3S?5N+Ty7^CKL|l8qtSsu{AU!LkdoV=Z7Dz_)829rsX7?AtY;XL1{b zl_jMzc^dVDw7HL_urlY;OG)6eG{KNx`Z)!b!LzEZl5&`P2UjbDRNY(5ODx-3fEdSe zy37gDn~>pse?3`gX<h7Q$&Y%=bP1Td*X1qwZ*MuJKrUUh>=alhlQN`QdZ}EpZUc5M zI*A5NHkr6oQl$L;Hbw)~l3JScY!;;ht(q~-qsZm~rJWS)KOAJ;k}zyqC$z&d01in> z$E|O&-taa)=-+GIylY4edlC21(s4y>f4PNU0lE=}`@pf4GZTyFZKq$;i;>KuXb-Kz z_)P&^&jxbFtqw#?%fX8~Y0JXwW?cCWgs0Uqy@E2)OhZZTX%LI+O-O;05T52pdXvCD zuJ<*Uo~$U~z0~WP1`VQ(hVDnT(nk$(%rx5MwiX?Lq$xZr`j965kY@?~Rm#qzwxNbL zN?EiW7pTBs+Qa31L(L0XvHg&ZP>jD2YiQq8@a_ie661{@F8?s>xu6XHJv~Fg>7=yh zvdzPOj&CW=`J^4INq=RZSOT~P4>RSzf%aFKkAhcpS2y3L(rM+LLEVPQJZ$)(sB8QP zUalPQHE1gu<e!B?+OHL~!zJf9)cC0nBlR@GwdWcugKNQ=#GthMDmnmuqpxe>*=|s> zTA<aQYiuvsu@=IMzFk1a^9*=J$I}^&&&4E#M+q3{*9xzf(stjzO_u!b(f&o2ho}pr zH|T&rtBFS}hJC6x(`U&KS^1VUJovLZjox#nt5kGkGVp_a$>d{M7px_3nUL4Y*n;V- z{b8WPiONtfNe(~*Z)0-k6@N(%+oIn-!{^xfaFH4Xq+7S->nZ8k1CF%MbS71|v@gDC zFwhGYleDLh)h;qzhD|Hqq#3Q<830jy?3_iUM5MkII#V6<9@$^Cq<Kl^UJ9JZ{MAcF zKMfUzRaR91KX^wPv@j1J`kw6YJ85pctR^r8l+YD$#xCYDYt<wpuw=4ELn&2tRa3ox zD=3QpG{G>BwVrX46`fUGy~)hulk5-RCD#Os!L$XHl@1-LC6=kCNSo)VNNtabWz<bq zApYWFOP$7N4#jF3`x@!u%`5C*xL2;*-fT|Ox;0)5E3I2Vw$rFWdP!aC-Pd|#@5x}8 zn2BTRZF4gV=ne~lK`3N4qB^V7q;6VE_B84_vI;3Rk<)FKY6!<c*hO!ui~Ycx9J+;9 z&3g!MS=}VEfh$qBKAr=u+{B<)<EW!+n<sC@nSei6oZCr9F?gQH0C>uXw7FWIC;L5b zqhEd?r`*xCimbjB8M3v^JArY~6N4;r7Jd-vaxz5a0vdKjNL2vhi_tzL2dukVgjNDQ z(a*g!z!tJGxjrE(^K3m&UvG)g^;eNIdf+@$x&ny>c^`pGSi(T1UTUchQZrgpJc}=F zVE~t|H0d-F0V*4e8akm$$cm4KI{_{S1=bEI8iN=oSiAuln>*n=5ry=|kg^|yX$dH? zdr2AjE<Q(!1Ay{uG>uSDz6!0pZ2)UE4O(|SwRAra#FDS|B3C|9f=vKz+N7Tjyx3`z z@}#=56H}4t`poI-428B#T(7jnoS`n2*G%=tmvY6$_|y3FGyXgNT<q^CL%&n_d7!sS zrbed3Hjg=@Ltb{>dKufpyP7#ApP9^wcD|VBFE?x*($^J9R(snKcTo>CHK+GDg64@a z*><gAf-x<$0-;Xx#O`}>DZQiA?xJpm%K^HsPp|UQi=W4>ZfA6vhGBL3Hgxqe2C9`P zZj%j6wi~F?(B5KFpG3`*Ivj5EWR|3vCwCm=Hczp}begAhoIKC8bn!5sGN#p0;Wk~| z)MUCkk93=E%d&|Np*Xj+neNRp<8eRcF+J9|*pH~Q>FKy=9u+L4;m4G=nO@h6ou;>= zu+#KeSvJ$xQSLVV7RO-uJLCgl)a6uPPe^1Bj2XZk%|O?I^UR>-hhOfNjb^aJVKYM= zhuh3B)-}T&r*&ecRlR00Hw~G^9kp&V!sS6`r1x!`8KvhkqaC@OW{h{$jIBM!ZI)OW zoo30}(~zcHLqjjDGfO?6xy{nGhtD(1+?Lxc>v+*-mNTZ9<sI?`Ua$!t8nHBBR#*j{ zW<^JV&8*~&H!C|DC?!fvl=Wn{S=Bjfo*B1hxy^XT)OlutTT{$L$E|L&+8S*$t2>;X zW=-f^K<BHF=~`^<ZS~6B|NmKYFdmVA3~l*;ugm{<70gh|bgmHo>^-=Z_xx_E@SnA> z-x}JB;(p#k=kYoeiM1^}lW{P`H2SKrlDW6%?+gA<O#idUpDX<zH`u;)`#FVDHUGKN zUsU%O3;r{||A(djrmk6=oZZNDzwU5p!PHbuI^Xo3e_&2CJe&SN2jQQ_RkOZewY;YP z&cKkbRE?no*1=E51tqKh+yeuVtAUi{3|VHIsVUArtOlly)(Ju;5tig~XmL25d>Zl( zD8*h4w$BtyW+*z#)f)Yq;)uM0Q8y)bpINThK>v8^Rl@ocNon=S1=Ze=`FKi@{*g$| zX=HektZmWuHu^+AKxuU{_LJ2psAXtNF{m#mlwBd`q*mO_2$t9&DOs6RAzwC?TL4v? z7&Yn+%@ROXZw%IZf>Rd*>jX`20<JytIqFvb0^kvTaVF+2AqY6PAKGbfE?5;{Ks$%8 z{Goqh%p3Yjh|emKG2*m7!09;zYNwJNc{~LpE1=MywgrC*lDd>Dn-{-8we*i&noaj< zJTamnK<5XMEz)_^c+Aro8~3_EaytEtb8pBoMVtK9GgfG4WB>;IK>uEAXiZx`>8AsY robrQ|owa>aoRhN_X(Mf!48lrA?l?kCzkiF4Q8iUAs#=ol_WAz{i*386 diff --git a/vendor/golang.org/x/net/publicsuffix/data/text b/vendor/golang.org/x/net/publicsuffix/data/text deleted file mode 100644 index 124dcd61..00000000 --- a/vendor/golang.org/x/net/publicsuffix/data/text +++ /dev/null @@ -1 +0,0 @@ -billustrationionjukudoyamakeupowiathletajimageandsoundandvision-riopretobishimagentositecnologiabiocelotenkawabipanasonicatfoodnetworkinggroupperbirdartcenterprisecloudaccesscamdvrcampaniabirkenesoddtangenovarahkkeravjuegoshikikiraraholtalenishikatakazakindependent-revieweirbirthplaceu-1bitbucketrzynishikatsuragirlyuzawabitternidiscoverybjarkoybjerkreimdbaltimore-og-romsdalp1bjugnishikawazukamishihoronobeautydalwaysdatabaseballangenkainanaejrietisalatinabenogatabitorderblackfridaybloombergbauernishimerabloxcms3-website-us-west-2blushakotanishinomiyashironocparachutingjovikarateu-2bmoattachmentsalangenishinoomotegovtattoolforgerockartuzybmsalon-1bmwellbeingzoneu-3bnrwesteuropenairbusantiquesaltdalomzaporizhzhedmarkaratsuginamikatagamilanotairesistanceu-4bondigitaloceanspacesaludishangrilanciabonnishinoshimatsusakahoginankokubunjindianapolis-a-bloggerbookonlinewjerseyboomlahppiacenzachpomorskienishiokoppegardiskussionsbereichattanooganordkapparaglidinglassassinationalheritageu-north-1boschaefflerdalondonetskarelianceu-south-1bostik-serveronagasukevje-og-hornnesalvadordalibabalatinord-aurdalipaywhirlondrinaplesknsalzburgleezextraspace-to-rentalstomakomaibarabostonakijinsekikogentappssejnyaarparalleluxembourglitcheltenham-radio-opensocialorenskogliwicebotanicalgardeno-staginglobodoes-itcouldbeworldisrechtranakamurataiwanairforcechireadthedocsxeroxfinitybotanicgardenishitosashimizunaminamiawajikindianmarketinglogowestfalenishiwakindielddanuorrindigenamsskoganeindustriabotanyanagawallonieruchomoscienceandindustrynissandiegoddabouncemerckmsdnipropetrovskjervoyageorgeorgiabounty-fullensakerrypropertiesamegawaboutiquebecommerce-shopselectaxihuanissayokkaichintaifun-dnsaliasamnangerboutireservditchyouriparasiteboyfriendoftheinternetflixjavaldaostathellevangerbozen-sudtirolottokorozawabozen-suedtirolouvreisenissedalovepoparisor-fronisshingucciprianiigataipeidsvollovesickariyakumodumeloyalistoragebplaceducatorprojectcmembersampalermomahaccapooguybrandywinevalleybrasiliadboxosascoli-picenorddalpusercontentcp4bresciaokinawashirosatobamagazineuesamsclubartowestus2brindisibenikitagataikikuchikumagayagawalmartgorybristoloseyouriparliamentjeldsundivtasvuodnakaniikawatanagurabritishcolumbialowiezaganiyodogawabroadcastlebtimnetzlgloomy-routerbroadwaybroke-itvedestrandivttasvuotnakanojohanamakindlefrakkestadiybrokerbrothermesaverdeatnulmemergencyachtsamsungloppennebrowsersafetymarketsandnessjoenl-ams-1brumunddalublindesnesandoybrunelastxn--0trq7p7nnbrusselsandvikcoromantovalle-daostavangerbruxellesanfranciscofreakunekobayashikaoirmemorialucaniabryanskodjedugit-pagespeedmobilizeroticagliaricoharuovatlassian-dev-builderscbglugsjcbnpparibashkiriabrynewmexicoacharterbuzzwfarmerseinebwhalingmbhartiffany-2bzhitomirbzzcodyn-vpndnsantacruzsantafedjeffersoncoffeedbackdropocznordlandrudupontariobranconavstackasaokamikoaniikappudownloadurbanamexhibitioncogretakamatsukawacollectioncolognewyorkshirebungoonordre-landurhamburgrimstadynamisches-dnsantamariakecolonialwilliamsburgripeeweeklylotterycoloradoplateaudnedalncolumbusheycommunexus-3community-prochowicecomobaravendbambleborkapsicilyonagoyauthgear-stagingivestbyglandroverhallair-traffic-controlleyombomloabaths-heilbronnoysunddnslivegarsheiheijibigawaustraliaustinnfshostrolekamisatokaizukameyamatotakadaustevollivornowtv-infolldalolipopmcdircompanychipstmncomparemarkerryhotelsantoandrepbodynaliasnesoddenmarkhangelskjakdnepropetrovskiervaapsteigenflfannefrankfurtjxn--12cfi8ixb8lutskashibatakashimarshallstatebankashiharacomsecaaskimitsubatamibuildingriwatarailwaycondoshichinohealth-carereformemsettlersanukindustriesteamfamberlevagangaviikanonjinfinitigotembaixadaconferenceconstructionconsuladogadollsaobernardomniweatherchanneluxuryconsultanthropologyconsultingroks-thisayamanobeokakegawacontactkmaxxn--12co0c3b4evalled-aostamayukinsuregruhostingrondarcontagematsubaravennaharimalborkashiwaracontemporaryarteducationalchikugodonnakaiwamizawashtenawsmppl-wawdev-myqnapcloudcontrolledogawarabikomaezakirunoopschlesischesaogoncartoonartdecologiacontractorskenconventureshinodearthickashiwazakiyosatokamachilloutsystemscloudsitecookingchannelsdvrdnsdojogaszkolancashirecifedexetercoolblogdnsfor-better-thanawassamukawatarikuzentakatairavpagecooperativano-frankivskygearapparochernigovernmentksatxn--1ck2e1bananarepublic-inquiryggeebinatsukigatajimidsundevelopmentatarantours3-external-1copenhagencyclopedichiropracticatholicaxiashorokanaiecoproductionsaotomeinforumzcorporationcorsicahcesuoloanswatch-and-clockercorvettenrissagaeroclubmedecincinnativeamericanantiquest-le-patron-k3sapporomuracosenzamamidorittoeigersundynathomebuiltwithdarkasserverrankoshigayaltakasugaintelligencecosidnshome-webservercellikescandypoppdaluzerncostumedicallynxn--1ctwolominamatargets-itlon-2couchpotatofriesardegnarutomobegetmyiparsardiniacouncilvivanovoldacouponsarlcozoracq-acranbrookuwanalyticsarpsborgrongausdalcrankyowariasahikawatchandclockasukabeauxartsandcraftsarufutsunomiyawakasaikaitabashijonawatecrdyndns-at-homedepotaruinterhostsolutionsasayamatta-varjjatmpartinternationalfirearmsaseboknowsitallcreditcardyndns-at-workshoppingrossetouchigasakitahiroshimansionsaskatchewancreditunioncremonashgabadaddjaguarqcxn--1lqs03ncrewhmessinarashinomutashinaintuitoyosatoyokawacricketnedalcrimeast-kazakhstanangercrotonecrownipartsassarinuyamashinazawacrsaudacruisesauheradyndns-blogsitextilegnicapetownnews-stagingroundhandlingroznycuisinellancasterculturalcentertainmentoyotapartysvardocuneocupcakecuritibabymilk3curvallee-d-aosteinkjerusalempresashibetsurugashimaringatlantajirinvestmentsavannahgacutegirlfriendyndns-freeboxoslocalzonecymrulvikasumigaurawa-mazowszexnetlifyinzairtrafficplexus-1cyonabarumesswithdnsaveincloudyndns-homednsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacyouthruherecipescaracaltanissettaishinomakilovecollegefantasyleaguernseyfembetsukumiyamazonawsglobalacceleratorahimeshimabaridagawatchesciencecentersciencehistoryfermockasuyamegurownproviderferraraferraris-a-catererferrerotikagoshimalopolskanlandyndns-picsaxofetsundyndns-remotewdyndns-ipasadenaroyfgujoinvilleitungsenfhvalerfidontexistmein-iservschulegallocalhostrodawarafieldyndns-serverdalfigueresindevicenzaolkuszczytnoipirangalsaceofilateliafilegear-augustowhoswholdingsmall-webthingscientistordalfilegear-debianfilegear-gbizfilegear-iefilegear-jpmorganfilegear-sg-1filminamiechizenfinalfinancefineartscrapper-sitefinlandyndns-weblikes-piedmonticellocus-4finnoyfirebaseappaviancarrdyndns-wikinkobearalvahkijoetsuldalvdalaskanittedallasalleasecuritytacticschoenbrunnfirenetoystre-slidrettozawafirenzefirestonefirewebpaascrappingulenfirmdaleikangerfishingoldpoint2thisamitsukefitjarvodkafjordyndns-workangerfitnessettlementozsdellogliastradingunmanxn--1qqw23afjalerfldrvalleeaosteflekkefjordyndns1flesberguovdageaidnunjargaflickragerogerscrysecretrosnubar0flierneflirfloginlinefloppythonanywhereggio-calabriafloraflorencefloridatsunangojomedicinakamagayahabackplaneapplinzis-a-celticsfanfloripadoval-daostavalleyfloristanohatakahamalselvendrellflorokunohealthcareerscwienflowerservehalflifeinsurancefltrani-andria-barletta-trani-andriaflynnhosting-clusterfnchiryukyuragifuchungbukharanzanfndynnschokokekschokoladenfnwkaszubytemarkatowicefoolfor-ourfor-somedio-campidano-mediocampidanomediofor-theaterforexrothachijolsterforgotdnservehttpbin-butterforli-cesena-forlicesenaforlillesandefjordynservebbscholarshipschoolbusinessebyforsaleirfjordynuniversityforsandasuolodingenfortalfortefortmissoulangevagrigentomologyeonggiehtavuoatnagahamaroygardencowayfortworthachinoheavyfosneservehumourfotraniandriabarlettatraniandriafoxfordecampobassociatest-iserveblogsytemp-dnserveirchitachinakagawashingtondchernivtsiciliafozfr-par-1fr-par-2franamizuhobby-sitefrancaiseharafranziskanerimalvikatsushikabedzin-addrammenuorochesterfredrikstadtvserveminecraftranoyfreeddnsfreebox-oservemp3freedesktopfizerfreemasonryfreemyiphosteurovisionfreesitefreetlservep2pgfoggiafreiburgushikamifuranorfolkebibleksvikatsuyamarugame-hostyhostingxn--2m4a15efrenchkisshikirkeneservepicservequakefreseniuscultureggio-emilia-romagnakasatsunairguardiannakadomarinebraskaunicommbankaufentigerfribourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganservesarcasmatartanddesignfrognfrolandynv6from-akrehamnfrom-alfrom-arfrom-azurewebsiteshikagamiishibukawakepnoorfrom-capitalonewportransipharmacienservicesevastopolefrom-coalfrom-ctranslatedynvpnpluscountryestateofdelawareclaimschoolsztynsettsupportoyotomiyazakis-a-candidatefrom-dchitosetodayfrom-dediboxafrom-flandersevenassisienarvikautokeinoticeablewismillerfrom-gaulardalfrom-hichisochikuzenfrom-iafrom-idyroyrvikingruenoharafrom-ilfrom-in-berlindasewiiheyaizuwakamatsubushikusakadogawafrom-ksharpharmacyshawaiijimarcheapartmentshellaspeziafrom-kyfrom-lanshimokawafrom-mamurogawatsonfrom-mdfrom-medizinhistorischeshimokitayamattelekommunikationfrom-mifunefrom-mnfrom-modalenfrom-mshimonitayanagit-reposts-and-telecommunicationshimonosekikawafrom-mtnfrom-nchofunatoriginstantcloudfrontdoorfrom-ndfrom-nefrom-nhktistoryfrom-njshimosuwalkis-a-chefarsundyndns-mailfrom-nminamifuranofrom-nvalleedaostefrom-nynysagamiharafrom-ohdattorelayfrom-oketogolffanshimotsukefrom-orfrom-padualstackazoologicalfrom-pratogurafrom-ris-a-conservativegashimotsumayfirstockholmestrandfrom-schmidtre-gauldalfrom-sdscloudfrom-tnfrom-txn--2scrj9chonanbunkyonanaoshimakanegasakikugawaltervistailscaleforcefrom-utsiracusaikirovogradoyfrom-vald-aostarostwodzislawildlifestylefrom-vtransportefrom-wafrom-wiardwebview-assetshinichinanfrom-wvanylvenneslaskerrylogisticshinjournalismartlabelingfrom-wyfrosinonefrostalowa-wolawafroyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairkitapps-auction-rancherkasydneyfujinomiyadattowebhoptogakushimotoganefujiokayamandalfujisatoshonairlinedre-eikerfujisawafujishiroishidakabiratoridedyn-berlincolnfujitsuruokazakiryuohkurafujiyoshidavvenjargap-east-1fukayabeardubaiduckdnsncfdfukuchiyamadavvesiidappnodebalancertmgrazimutheworkpccwilliamhillfukudomigawafukuis-a-cpalacefukumitsubishigakisarazure-mobileirvikazteleportlligatransurlfukuokakamigaharafukuroishikarikaturindalfukusakishiwadazaifudaigokaseljordfukuyamagatakaharunusualpersonfunabashiriuchinadafunagatakahashimamakisofukushimangonnakatombetsumy-gatewayfunahashikamiamakusatsumasendaisenergyfundaciofunkfeuerfuoiskujukuriyamangyshlakasamatsudoomdnstracefuosskoczowinbar1furubirafurudonostiaafurukawajimaniwakuratefusodegaurafussaintlouis-a-anarchistoireggiocalabriafutabayamaguchinomihachimanagementrapaniizafutboldlygoingnowhere-for-morenakatsugawafuttsurutaharafuturecmshinjukumamotoyamashikefuturehostingfuturemailingfvghamurakamigoris-a-designerhandcraftedhandsonyhangglidinghangoutwentehannanmokuizumodenaklodzkochikuseihidorahannorthwesternmutualhanyuzenhapmircloudletshintokushimahappounzenharvestcelebrationhasamap-northeast-3hasaminami-alpshintomikasaharahashbangryhasudahasura-apphiladelphiaareadmyblogspotrdhasvikfh-muensterhatogayahoooshikamaishimofusartshinyoshitomiokamisunagawahatoyamazakitakatakanabeatshiojirishirifujiedahatsukaichikaiseiyoichimkentrendhostinghattfjelldalhayashimamotobusellfylkesbiblackbaudcdn-edgestackhero-networkisboringhazuminobushistoryhelplfinancialhelsinkitakyushuaiahembygdsforbundhemneshioyanaizuerichardlimanowarudahemsedalhepforgeblockshirahamatonbetsurgeonshalloffameiwamasoyheroyhetemlbfanhgtvaohigashiagatsumagoianiahigashichichibuskerudhigashihiroshimanehigashiizumozakitamigrationhigashikagawahigashikagurasoedahigashikawakitaaikitamotosunndalhigashikurumeeresinstaginghigashimatsushimarburghigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshirakokonoehigashinarusells-for-lesshiranukamitondabayashiogamagoriziahigashinehigashiomitamanortonsberghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitanakagusukumodernhigashitsunosegawahigashiurausukitashiobarahigashiyamatokoriyamanashifteditorxn--30rr7yhigashiyodogawahigashiyoshinogaris-a-doctorhippyhiraizumisatohnoshoohirakatashinagawahiranairportland-4-salernogiessennanjobojis-a-financialadvisor-aurdalhirarahiratsukaerusrcfastlylbanzaicloudappspotagerhirayaitakaokalmykiahistorichouseshiraois-a-geekhakassiahitachiomiyagildeskaliszhitachiotagonohejis-a-greenhitraeumtgeradegreehjartdalhjelmelandholeckodairaholidayholyhomegoodshiraokamitsuehomeiphilatelyhomelinkyard-cloudjiffyresdalhomelinuxn--32vp30hachiojiyahikobierzycehomeofficehomesecuritymacaparecidahomesecuritypchoseikarugamvikarlsoyhomesenseeringhomesklepphilipsynology-diskstationhomeunixn--3bst00minamiiserniahondahongooglecodebergentinghonjyoitakarazukaluganskharkivaporcloudhornindalhorsells-for-ustkanmakiwielunnerhortendofinternet-dnshiratakahagitapphoenixn--3ds443ghospitalhoteleshishikuis-a-guruhotelwithflightshisognehotmailhoyangerhoylandetakasagophonefosshisuifuettertdasnetzhumanitieshitaramahungryhurdalhurumajis-a-hard-workershizukuishimogosenhyllestadhyogoris-a-hunterhyugawarahyundaiwafuneis-into-carsiiitesilkharkovaresearchaeologicalvinklein-the-bandairtelebitbridgestoneenebakkeshibechambagricultureadymadealstahaugesunderseaportsinfolionetworkdalaheadjudygarlandis-into-cartoonsimple-urlis-into-gamesserlillyis-leetrentin-suedtirolis-lostre-toteneis-a-lawyeris-not-certifiedis-savedis-slickhersonis-uberleetrentino-a-adigeis-very-badajozis-a-liberalis-very-evillageis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandovre-eikerisleofmanaustdaljellybeanjenv-arubahccavuotnagaragusabaerobaticketsirdaljeonnamerikawauejetztrentino-aadigejevnakershusdecorativeartslupskhmelnytskyivarggatrentino-alto-adigejewelryjewishartgalleryjfkhplaystation-cloudyclusterjgorajlljls-sto1jls-sto2jls-sto3jmphotographysiojnjaworznospamproxyjoyentrentino-altoadigejoyokaichibajddarchitecturealtorlandjpnjprslzjurkotohiradomainstitutekotourakouhokutamamurakounosupabasembokukizunokunimilitarykouyamarylhurstjordalshalsenkouzushimasfjordenkozagawakozakis-a-llamarnardalkozowindowskrakowinnersnoasakatakkokamiminersokndalkpnkppspbarcelonagawakkanaibetsubamericanfamilyds3-fips-us-gov-west-1krasnikahokutokashikis-a-musiciankrasnodarkredstonekrelliankristiansandcatsolarssonkristiansundkrodsheradkrokstadelvalle-aostatic-accessolognekryminamiizukaminokawanishiaizubangekumanotteroykumatorinovecoregontrailroadkumejimashikis-a-nascarfankumenantokonamegatakatoris-a-nursells-itrentin-sud-tirolkunisakis-a-painteractivelvetrentin-sudtirolkunitachiaraindropilotsolundbecknx-serversellsyourhomeftphxn--3e0b707ekunitomigusukuleuvenetokigawakunneppuboliviajessheimpertrixcdn77-secureggioemiliaromagnamsosnowiechristiansburgminakamichiharakunstsammlungkunstunddesignkuokgroupimientaketomisatoolsomakurehabmerkurgankurobeeldengeluidkurogimimatakatsukis-a-patsfankuroisoftwarezzoologykuromatsunais-a-personaltrainerkuronkurotakikawasakis-a-photographerokussldkushirogawakustanais-a-playershiftcryptonomichigangwonkusupersalezajskomakiyosemitekutchanelkutnowruzhgorodeokuzumakis-a-republicanonoichinomiyakekvafjordkvalsundkvamscompute-1kvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsomnatalkzmisakis-a-soxfanmisasaguris-a-studentalmisawamisconfusedmishimasudamissilemisugitokuyamatsumaebashikshacknetrentino-sued-tirolmitakeharamitourismilemitoyoakemiuramiyazurecontainerdpolicemiyotamatsukuris-a-teacherkassyno-dshowamjondalenmonstermontrealestatefarmequipmentrentino-suedtirolmonza-brianzapposor-odalmonza-e-della-brianzaptokyotangotpantheonsitemonzabrianzaramonzaebrianzamonzaedellabrianzamoonscalebookinghostedpictetrentinoa-adigemordoviamoriyamatsumotofukemoriyoshiminamiashigaramormonmouthachirogatakamoriokakudamatsuemoroyamatsunomortgagemoscowiosor-varangermoseushimodatemosjoenmoskenesorfoldmossorocabalena-devicesorreisahayakawakamiichikawamisatottoris-a-techietis-a-landscaperspectakasakitchenmosvikomatsushimarylandmoteginowaniihamatamakinoharamoviemovimientolgamozilla-iotrentinoaadigemtranbytomaritimekeepingmuginozawaonsensiositemuikaminoyamaxunispacemukoebenhavnmulhouseoullensvanguardmunakatanemuncienciamuosattemupinbarclaycards3-sa-east-1murmanskomforbar2murotorcraftrentinoalto-adigemusashinoharamuseetrentinoaltoadigemuseumverenigingmusicargodaddyn-o-saurlandesortlandmutsuzawamy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoruminamimakis-a-rockstarachowicemydattolocalcertificationmyddnsgeekgalaxymydissentrentinos-tirolmydobissmarterthanyoumydrobofageologymydsoundcastronomy-vigorlicemyeffectrentinostirolmyfastly-terrariuminamiminowamyfirewalledreplittlestargardmyforuminamioguni5myfritzmyftpaccessouthcarolinaturalhistorymuseumcentermyhome-servermyjinomykolaivencloud66mymailermymediapchristmasakillucernemyokohamamatsudamypepinkommunalforbundmypetsouthwest1-uslivinghistorymyphotoshibalashovhadanorth-kazakhstanmypicturestaurantrentinosud-tirolmypsxn--3pxu8kommunemysecuritycamerakermyshopblocksowamyshopifymyspreadshopwarendalenugmythic-beastspectruminamisanrikubetsuppliesoomytis-a-bookkeepermaritimodspeedpartnermytuleap-partnersphinxn--41amyvnchromediatechnologymywirepaircraftingvollohmusashimurayamashikokuchuoplantationplantspjelkavikomorotsukagawaplatformsharis-a-therapistoiaplatter-appinokofuefukihaboromskogplatterpioneerplazaplcube-serversicherungplumbingoplurinacionalpodhalepodlasiellaktyubinskiptveterinairealmpmnpodzonepohlpoivronpokerpokrovskomvuxn--3hcrj9choyodobashichikashukujitawaraumalatvuopmicrosoftbankarmoypoliticarrierpolitiendapolkowicepoltavalle-d-aostaticspydebergpomorzeszowitdkongsbergponpesaro-urbino-pesarourbinopesaromasvuotnarusawapordenonepornporsangerporsangugeporsgrunnanyokoshibahikariwanumatakinouepoznanpraxis-a-bruinsfanprdpreservationpresidioprgmrprimetelemarkongsvingerprincipeprivatizehealthinsuranceprofesionalprogressivestfoldpromombetsupplypropertyprotectionprotonetrentinosued-tirolprudentialpruszkowithgoogleapiszprvcyberprzeworskogpulawypunyufuelveruminamiuonumassa-carrara-massacarraramassabuyshousesopotrentino-sud-tirolpupugliapussycateringebuzentsujiiepvhadselfiphdfcbankazunoticiashinkamigototalpvtrentinosuedtirolpwchungnamdalseidsbergmodellingmxn--11b4c3dray-dnsupdaterpzqhaebaruericssongdalenviknakayamaoris-a-cubicle-slavellinodeobjectshinshinotsurfashionstorebaselburguidefinimamateramochizukimobetsumidatlantichirurgiens-dentistes-en-franceqldqotoyohashimotoshimatsuzakis-an-accountantshowtimelbourneqponiatowadaqslgbtrentinsud-tirolqualifioappippueblockbusternopilawaquickconnectrentinsudtirolquicksytesrhtrentinsued-tirolquipelementsrltunestuff-4-saletunkonsulatrobeebyteappigboatsmolaquilanxessmushcdn77-sslingturystykaniepcetuscanytushuissier-justicetuvalleaostaverntuxfamilytwmailvestvagoyvevelstadvibo-valentiavibovalentiavideovillastufftoread-booksnestorfjordvinnicasadelamonedagestangevinnytsiavipsinaappiwatevirginiavirtual-uservecounterstrikevirtualcloudvirtualservervirtualuserveexchangevirtuelvisakuhokksundviterbolognagasakikonaikawagoevivianvivolkenkundenvixn--42c2d9avlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavminanovologdanskonyveloftrentino-stirolvolvolkswagentstuttgartrentinsuedtirolvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiecircustomer-ocimmobilienwixsitewloclawekoobindalwmcloudwmflabsurnadalwoodsidelmenhorstabackyardsurreyworse-thandawowithyoutuberspacekitagawawpdevcloudwpenginepoweredwphostedmailwpmucdnpixolinodeusercontentrentinosudtirolwpmudevcdnaccessokanagawawritesthisblogoipizzawroclawiwatsukiyonoshiroomgwtcirclerkstagewtfastvps-serverisignwuozuwzmiuwajimaxn--4gbriminingxn--4it168dxn--4it797kooris-a-libertarianxn--4pvxs4allxn--54b7fta0ccivilaviationredumbrellajollamericanexpressexyxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49civilisationrenderxn--5rtq34koperviklabudhabikinokawachinaganoharamcocottempurlxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilizationthewifiatmallorcafederation-webspacexn--80aaa0cvacationsusonoxn--80adxhksuzakananiimiharuxn--80ao21axn--80aqecdr1axn--80asehdbarclays3-us-east-2xn--80aswgxn--80aukraanghkembuchikujobservableusercontentrevisohughestripperxn--8dbq2axn--8ltr62koryokamikawanehonbetsuwanouchijiwadeliveryxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisenbahnxn--90a3academiamicable-modemoneyxn--90aeroportalabamagasakishimabaraffleentry-snowplowiczeladzxn--90aishobarakawaharaoxn--90amckinseyxn--90azhytomyrxn--9dbhblg6dietritonxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byandexcloudxn--asky-iraxn--aurskog-hland-jnbarefootballooningjerstadgcapebretonamicrolightingjesdalombardiadembroideryonagunicloudiherokuappanamasteiermarkaracoldwarszawauthgearappspacehosted-by-previderxn--avery-yuasakuragawaxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsuzukanazawaxn--bck1b9a5dre4civilwarmiasadoesntexisteingeekarpaczest-a-la-maisondre-landrayddns5yxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurgeryxn--bjddar-ptargithubpreviewsaitohmannore-og-uvdalxn--blt-elabourxn--bmlo-graingerxn--bod-2naturalsciencesnaturellesuzukis-an-actorxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-acornxn--brum-voagatroandinosaureportrentoyonakagyokutoyakomaganexn--btsfjord-9zaxn--bulsan-sdtirol-nsbaremetalpha-myqnapcloud9guacuiababia-goracleaningitpagexlimoldell-ogliastraderxn--c1avgxn--c2br7gxn--c3s14mincomcastreserve-onlinexn--cck2b3bargainstances3-us-gov-west-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-an-actresshwindmillxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr694barreaudiblebesbydgoszczecinemagnethnologyoriikaragandauthordalandroiddnss3-ap-southeast-2ix4432-balsan-suedtirolimiteddnskinggfakefurniturecreationavuotnaritakoelnayorovigotsukisosakitahatakahatakaishimoichinosekigaharaurskog-holandingitlaborxn--czrs0trogstadxn--czru2dxn--czrw28barrel-of-knowledgeappgafanquanpachicappacificurussiautomotivelandds3-ca-central-16-balsan-sudtirollagdenesnaaseinet-freaks3-ap-southeast-123websiteleaf-south-123webseiteckidsmynasushiobarackmazerbaijan-mayen-rootaribeiraogakibichuobiramusementdllpages3-ap-south-123sitewebhareidfjordvagsoyerhcloudd-dnsiskinkyolasiteastcoastaldefenceastus2038xn--d1acj3barrell-of-knowledgecomputerhistoryofscience-fictionfabricafjs3-us-west-1xn--d1alfaromeoxn--d1atromsakegawaxn--d5qv7z876clanbibaidarmeniaxn--davvenjrga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmailukowhitesnow-dnsangohtawaramotoineppubtlsanjotelulubin-brbambinagisobetsuitagajoburgjerdrumcprequalifymein-vigorgebetsukuibmdeveloperauniteroizumizakinderoyomitanobninskanzakiyokawaraustrheimatunduhrennebulsan-suedtirololitapunk123kotisivultrobjectselinogradimo-siemenscaledekaascolipiceno-ipifony-1337xn--eckvdtc9dxn--efvn9svalbardunloppaderbornxn--efvy88hagakhanamigawaxn--ehqz56nxn--elqq16hagebostadxn--eveni-0qa01gaxn--f6qx53axn--fct429kosakaerodromegallupaasdaburxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsvchurchaseljeepsondriodejaneirockyotobetsuliguriaxn--fiq64barsycenterprisesakievennodesadistcgrouplidlugolekagaminord-frontierxn--fiqs8sveioxn--fiqz9svelvikoninjambylxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbssvizzeraxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grandrapidsvn-repostorjcloud-ver-jpchowderxn--frna-woaraisaijosoyroroswedenxn--frya-hraxn--fzc2c9e2cleverappsannanxn--fzys8d69uvgmailxn--g2xx48clicketcloudcontrolapparmatsuuraxn--gckr3f0fauskedsmokorsetagayaseralingenoamishirasatogliattipschulserverxn--gecrj9clickrisinglesannohekinannestadraydnsanokaruizawaxn--ggaviika-8ya47haibarakitakamiizumisanofidelitysfjordxn--gildeskl-g0axn--givuotna-8yasakaiminatoyookaneyamazoexn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-an-anarchistoricalsocietysnesigdalxn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45br5cylxn--gnstigliefern-wobihirosakikamijimatsushigexn--h-2failxn--h1aeghair-surveillancexn--h1ahnxn--h1alizxn--h2breg3eveneswidnicasacampinagrandebungotakadaemongolianxn--h2brj9c8clinichippubetsuikilatironporterxn--h3cuzk1digickoseis-a-linux-usershoujis-a-knightpointtohoboleslawieconomiastalbanshizuokamogawaxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinewhampshirealtychyattorneyagawakuyabukihokumakogeniwaizumiotsurugimbalsfjordeportexaskoyabeagleboardetroitskypecorivneatonoshoes3-eu-west-3utilitiesquare7xn--hebda8basicserversaillesjabbottateshinanomachildrensgardenhlfanhsbc66xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-aptibleangaviikadenaamesjevuemielnoboribetsuckswidnikkolobrzegersundxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugithubusercontentromsojamisonxn--io0a7is-an-artistgstagexn--j1adpkomonotogawaxn--j1aefbsbxn--1lqs71dyndns-office-on-the-webhostingrpassagensavonarviikamiokameokamakurazakiwakunigamihamadaxn--j1ael8basilicataniautoscanadaeguambulancentralus-2xn--j1amhakatanorthflankddiamondshinshiroxn--j6w193gxn--jlq480n2rgxn--jlq61u9w7basketballfinanzgorzeleccodespotenzakopanewspaperxn--jlster-byasuokannamihokkaidopaaskvollxn--jrpeland-54axn--jvr189miniserversusakis-a-socialistg-builderxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45brj9cistrondheimperiaxn--koluokta-7ya57hakodatexn--kprw13dxn--kpry57dxn--kput3is-an-engineeringxn--krager-gyatominamibosogndalxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdevcloudfunctionsimplesitexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanoyakagexn--kvnangen-k0axn--l-1fairwindswiebodzin-dslattuminamiyamashirokawanabeepilepsykkylvenicexn--l1accentureklamborghinikolaeventswinoujscienceandhistoryxn--laheadju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52batochigifts3-us-west-2xn--lesund-huaxn--lgbbat1ad8jdfaststackschulplattformetacentrumeteorappassenger-associationxn--lgrd-poacctrusteexn--lhppi-xqaxn--linds-pramericanartrvestnestudioxn--lns-qlavagiskexn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacliniquedapliexn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddeswisstpetersburgxn--mgb9awbfbx-ostrowwlkpmguitarschwarzgwangjuifminamidaitomanchesterxn--mgba3a3ejtrycloudflarevistaplestudynamic-dnsrvaroyxn--mgba3a4f16axn--mgba3a4fra1-deloittevaksdalxn--mgba7c0bbn0axn--mgbaakc7dvfstdlibestadxn--mgbaam7a8hakonexn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscordsays3-website-ap-northeast-1xn--mgbai9azgqp6jejuniperxn--mgbayh7gpalmaseratis-an-entertainerxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskosherbrookegawaxn--mgbqly7c0a67fbclintonkotsukubankarumaifarmsteadrobaknoluoktachikawakayamadridvallee-aosteroyxn--mgbqly7cvafr-1xn--mgbt3dhdxn--mgbtf8flapymntrysiljanxn--mgbtx2bauhauspostman-echocolatemasekd1xn--mgbx4cd0abbvieeexn--mix082fbxoschweizxn--mix891fedorainfraclouderaxn--mjndalen-64axn--mk0axin-vpnclothingdustdatadetectjmaxxxn--12c1fe0bradescotlandrrxn--mk1bu44cn-northwest-1xn--mkru45is-bykleclerchoshibuyachiyodancexn--mlatvuopmi-s4axn--mli-tlavangenxn--mlselv-iuaxn--moreke-juaxn--mori-qsakurais-certifiedxn--mosjen-eyawaraxn--mot-tlazioxn--mre-og-romsdal-qqbuseranishiaritakurashikis-foundationxn--msy-ula0hakubaghdadultravelchannelxn--mtta-vrjjat-k7aflakstadaokagakicks-assnasaarlandxn--muost-0qaxn--mxtq1minisitexn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45q11citadelhicampinashikiminohostfoldnavyxn--nit225koshimizumakiyosunnydayxn--nmesjevuemie-tcbalestrandabergamoarekeymachineustarnbergxn--nnx388axn--nodessakyotanabelaudiopsysynology-dstreamlitappittsburghofficialxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeserveftplanetariuminamitanexn--nvuotna-hwaxn--nyqy26axn--o1achernihivgubsxn--o3cw4hakuis-a-democratravelersinsurancexn--o3cyx2axn--od0algxn--od0aq3belementorayoshiokanumazuryukuhashimojibxos3-website-ap-southeast-1xn--ogbpf8flatangerxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osyro-wuaxn--otu796dxn--p1acfedorapeoplegoismailillehammerfeste-ipatriaxn--p1ais-gonexn--pgbs0dhlx3xn--porsgu-sta26fedoraprojectoyotsukaidoxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cngreaterxn--qcka1pmcpenzaporizhzhiaxn--qqqt11minnesotaketakayamassivegridxn--qxa6axn--qxamsterdamnserverbaniaxn--rady-iraxn--rdal-poaxn--rde-ulaxn--rdy-0nabaris-into-animeetrentin-sued-tirolxn--rennesy-v1axn--rhkkervju-01afeiraquarelleasingujaratoyouraxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturbruksgymnxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hakusanagochihayaakasakawaiishopitsitexn--rovu88bellevuelosangeles3-website-ap-southeast-2xn--rros-granvindafjordxn--rskog-uuaxn--rst-0naturhistorischesxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithaldenxn--s9brj9cnpyatigorskolecznagatorodoyxn--sandnessjen-ogbellunord-odalombardyn53xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4dbgdty6citichernovtsyncloudrangedaluccarbonia-iglesias-carboniaiglesiascarboniaxn--skierv-utazasxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5natuurwetenschappenginexn--slt-elabcieszynh-servebeero-stageiseiroumuenchencoreapigeelvinckoshunantankmpspawnextdirectrentino-s-tirolxn--smla-hraxn--smna-gratangentlentapisa-geekosugexn--snase-nraxn--sndre-land-0cbeneventochiokinoshimaintenancebinordreisa-hockeynutazurestaticappspaceusercontentateyamaveroykenglandeltaitogitsumitakagiizeasypanelblagrarchaeologyeongbuk0emmafann-arboretumbriamallamaceiobbcg123homepagefrontappchizip61123minsidaarborteaches-yogasawaracingroks-theatree123hjemmesidealerimo-i-rana4u2-localhistorybolzano-altoadigeometre-experts-comptables3-ap-northeast-123miwebcambridgehirn4t3l3p0rtarumizusawabogadobeaemcloud-fr123paginaweberkeleyokosukanrabruzzombieidskoguchikushinonsenasakuchinotsuchiurakawafaicloudineat-url-o-g-i-naval-d-aosta-valleyokote164-b-datacentermezproxyzgoraetnabudejjudaicadaquest-mon-blogueurodirumaceratabuseating-organicbcn-north-123saitamakawabartheshopencraftrainingdyniajuedischesapeakebayernavigationavoi234lima-cityeats3-ap-northeast-20001wwwedeployokozeastasiamunemurorangecloudplatform0xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbentleyurihonjournalistjohnikonanporovnobserverxn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bulls-fanxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbeppublishproxyusuharavocatanzarowegroweiboltashkentatamotorsitestingivingjemnes3-eu-central-1kappleadpages-12hpalmspringsakerxn--stre-toten-zcbeskidyn-ip24xn--t60b56axn--tckweddingxn--tiq49xqyjelasticbeanstalkhmelnitskiyamarumorimachidaxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbestbuyshoparenagareyamaizurugbyenvironmentalconservationflashdrivefsnillfjordiscordsezjampaleoceanographics3-website-eu-west-1xn--trentin-sdtirol-7vbetainaboxfuseekloges3-website-sa-east-1xn--trentino-sd-tirol-c3bhzcasertainaioirasebastopologyeongnamegawafflecellclstagemologicaliforniavoues3-eu-west-1xn--trentino-sdtirol-szbielawalbrzycharitypedreamhostersvp4xn--trentinosd-tirol-rzbiellaakesvuemieleccebizenakanotoddeninoheguriitatebayashiibahcavuotnagaivuotnagaokakyotambabybluebitelevisioncilla-speziaxarnetbank8s3-eu-west-2xn--trentinosdtirol-7vbieszczadygeyachimataijiiyamanouchikuhokuryugasakitaurayasudaxn--trentinsd-tirol-6vbievat-band-campaignieznombrendlyngengerdalces3-website-us-east-1xn--trentinsdtirol-nsbifukagawalesundiscountypeformelhusgardeninomiyakonojorpelandiscourses3-website-us-west-1xn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvestre-slidrexn--uc0ay4axn--uist22halsakakinokiaxn--uisz3gxn--unjrga-rtarnobrzegyptianxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtularvikonskowolayangroupiemontexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccerxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbigvalledaostaobaomoriguchiharag-cloud-championshiphoplixboxenirasakincheonishiazaindependent-commissionishigouvicasinordeste-idclkarasjohkamikitayamatsurindependent-inquest-a-la-masionishiharaxn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoaxn--vgsy-qoa0jelenia-goraxn--vgu402cnsantabarbaraxn--vhquvestre-totennishiawakuraxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861biharstadotsubetsugaruhrxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cntjomeldaluroyxn--wgbl6axn--xhq521bihorologyusuisservegame-serverxn--xkc2al3hye2axn--xkc2dl3a5ee0hammarfeastafricaravantaaxn--y9a3aquariumintereitrentino-sudtirolxn--yer-znaumburgxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4dbrk0cexn--ystre-slidre-ujbikedaejeonbukarasjokarasuyamarriottatsunoceanographiquehimejindependent-inquiryuufcfanishiizunazukindependent-panelomoliseminemrxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bilbaogashimadachicagoboavistanbulsan-sudtirolbia-tempio-olbiatempioolbialystokkeliwebredirectme-south-1xnbayxz \ No newline at end of file diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go deleted file mode 100644 index d56e9e76..00000000 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// Package publicsuffix provides a public suffix list based on data from -// https://publicsuffix.org/ -// -// A public suffix is one under which Internet users can directly register -// names. It is related to, but different from, a TLD (top level domain). -// -// "com" is a TLD (top level domain). Top level means it has no dots. -// -// "com" is also a public suffix. Amazon and Google have registered different -// siblings under that domain: "amazon.com" and "google.com". -// -// "au" is another TLD, again because it has no dots. But it's not "amazon.au". -// Instead, it's "amazon.com.au". -// -// "com.au" isn't an actual TLD, because it's not at the top level (it has -// dots). But it is an eTLD (effective TLD), because that's the branching point -// for domain name registrars. -// -// Another name for "an eTLD" is "a public suffix". Often, what's more of -// interest is the eTLD+1, or one more label than the public suffix. For -// example, browsers partition read/write access to HTTP cookies according to -// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from -// "google.com.au", but web pages served from "maps.google.com" can share -// cookies from "www.google.com", so you don't have to sign into Google Maps -// separately from signing into Google Web Search. Note that all four of those -// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, -// the last two are not (but share the same eTLD+1: "google.com"). -// -// All of these domains have the same eTLD+1: -// - "www.books.amazon.co.uk" -// - "books.amazon.co.uk" -// - "amazon.co.uk" -// -// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". -// -// There is no closed form algorithm to calculate the eTLD of a domain. -// Instead, the calculation is data driven. This package provides a -// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at -// https://publicsuffix.org/ -package publicsuffix // import "golang.org/x/net/publicsuffix" - -// TODO: specify case sensitivity and leading/trailing dot behavior for -// func PublicSuffix and func EffectiveTLDPlusOne. - -import ( - "fmt" - "net/http/cookiejar" - "strings" -) - -// List implements the cookiejar.PublicSuffixList interface by calling the -// PublicSuffix function. -var List cookiejar.PublicSuffixList = list{} - -type list struct{} - -func (list) PublicSuffix(domain string) string { - ps, _ := PublicSuffix(domain) - return ps -} - -func (list) String() string { - return version -} - -// PublicSuffix returns the public suffix of the domain using a copy of the -// publicsuffix.org database compiled into the library. -// -// icann is whether the public suffix is managed by the Internet Corporation -// for Assigned Names and Numbers. If not, the public suffix is either a -// privately managed domain (and in practice, not a top level domain) or an -// unmanaged top level domain (and not explicitly mentioned in the -// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN -// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and -// "cromulent" is an unmanaged top level domain. -// -// Use cases for distinguishing ICANN domains like "foo.com" from private -// domains like "foo.appspot.com" can be found at -// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases -func PublicSuffix(domain string) (publicSuffix string, icann bool) { - lo, hi := uint32(0), uint32(numTLD) - s, suffix, icannNode, wildcard := domain, len(domain), false, false -loop: - for { - dot := strings.LastIndex(s, ".") - if wildcard { - icann = icannNode - suffix = 1 + dot - } - if lo == hi { - break - } - f := find(s[1+dot:], lo, hi) - if f == notFound { - break - } - - u := uint32(nodes.get(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) - icannNode = u&(1<<nodesBitsICANN-1) != 0 - u >>= nodesBitsICANN - u = children.get(u & (1<<nodesBitsChildren - 1)) - lo = u & (1<<childrenBitsLo - 1) - u >>= childrenBitsLo - hi = u & (1<<childrenBitsHi - 1) - u >>= childrenBitsHi - switch u & (1<<childrenBitsNodeType - 1) { - case nodeTypeNormal: - suffix = 1 + dot - case nodeTypeException: - suffix = 1 + len(s) - break loop - } - u >>= childrenBitsNodeType - wildcard = u&(1<<childrenBitsWildcard-1) != 0 - if !wildcard { - icann = icannNode - } - - if dot == -1 { - break - } - s = s[:dot] - } - if suffix == len(domain) { - // If no rules match, the prevailing rule is "*". - return domain[1+strings.LastIndex(domain, "."):], icann - } - return domain[suffix:], icann -} - -const notFound uint32 = 1<<32 - 1 - -// find returns the index of the node in the range [lo, hi) whose label equals -// label, or notFound if there is no such node. The range is assumed to be in -// strictly increasing node label order. -func find(label string, lo, hi uint32) uint32 { - for lo < hi { - mid := lo + (hi-lo)/2 - s := nodeLabel(mid) - if s < label { - lo = mid + 1 - } else if s == label { - return mid - } else { - hi = mid - } - } - return notFound -} - -// nodeLabel returns the label for the i'th node. -func nodeLabel(i uint32) string { - x := nodes.get(i) - length := x & (1<<nodesBitsTextLength - 1) - x >>= nodesBitsTextLength - offset := x & (1<<nodesBitsTextOffset - 1) - return text[offset : offset+length] -} - -// EffectiveTLDPlusOne returns the effective top level domain plus one more -// label. For example, the eTLD+1 for "foo.bar.golang.org" is "golang.org". -func EffectiveTLDPlusOne(domain string) (string, error) { - if strings.HasPrefix(domain, ".") || strings.HasSuffix(domain, ".") || strings.Contains(domain, "..") { - return "", fmt.Errorf("publicsuffix: empty label in domain %q", domain) - } - - suffix, _ := PublicSuffix(domain) - if len(domain) <= len(suffix) { - return "", fmt.Errorf("publicsuffix: cannot derive eTLD+1 for domain %q", domain) - } - i := len(domain) - len(suffix) - 1 - if domain[i] != '.' { - return "", fmt.Errorf("publicsuffix: invalid public suffix %q for domain %q", suffix, domain) - } - return domain[1+strings.LastIndex(domain[:i], "."):], nil -} - -type uint32String string - -func (u uint32String) get(i uint32) uint32 { - off := i * 4 - return (uint32(u[off])<<24 | - uint32(u[off+1])<<16 | - uint32(u[off+2])<<8 | - uint32(u[off+3])) -} - -type uint40String string - -func (u uint40String) get(i uint32) uint64 { - off := uint64(i * (nodesBits / 8)) - return uint64(u[off])<<32 | - uint64(u[off+1])<<24 | - uint64(u[off+2])<<16 | - uint64(u[off+3])<<8 | - uint64(u[off+4]) -} diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go deleted file mode 100644 index 6bdadcc4..00000000 --- a/vendor/golang.org/x/net/publicsuffix/table.go +++ /dev/null @@ -1,70 +0,0 @@ -// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -import _ "embed" - -const version = "publicsuffix.org's public_suffix_list.dat, git revision e248cbc92a527a166454afe9914c4c1b4253893f (2022-11-15T18:02:38Z)" - -const ( - nodesBits = 40 - nodesBitsChildren = 10 - nodesBitsICANN = 1 - nodesBitsTextOffset = 16 - nodesBitsTextLength = 6 - - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 -) - -// numTLD is the number of top level domains. -const numTLD = 1494 - -// text is the combined text of all labels. -// -//go:embed data/text -var text string - -// nodes is the list of nodes. Each node is represented as a 40-bit integer, -// which encodes the node's children, wildcard bit and node type (as an index -// into the children array), ICANN bit and text. -// -// The layout within the node, from MSB to LSB, is: -// -// [ 7 bits] unused -// [10 bits] children index -// [ 1 bits] ICANN bit -// [16 bits] text index -// [ 6 bits] text length -// -//go:embed data/nodes -var nodes uint40String - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// -// [ 1 bits] unused -// [ 1 bits] wildcard bit -// [ 2 bits] node type -// [14 bits] high nodes index (exclusive) of children -// [14 bits] low nodes index (inclusive) of children -// -//go:embed data/children -var children uint32String - -// max children 718 (capacity 1023) -// max text offset 32976 (capacity 65535) -// max text length 36 (capacity 63) -// max hi 9656 (capacity 16383) -// max lo 9651 (capacity 16383) diff --git a/vendor/golang.org/x/sys/plan9/asm.s b/vendor/golang.org/x/sys/plan9/asm.s deleted file mode 100644 index 06449ebf..00000000 --- a/vendor/golang.org/x/sys/plan9/asm.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -TEXT ·use(SB),NOSPLIT,$0 - RET diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_386.s b/vendor/golang.org/x/sys/plan9/asm_plan9_386.s deleted file mode 100644 index bc5cab1f..00000000 --- a/vendor/golang.org/x/sys/plan9/asm_plan9_386.s +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// -// System call support for 386, Plan 9 -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-32 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-44 - JMP syscall·Syscall6(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) - -TEXT ·seek(SB),NOSPLIT,$0-36 - JMP syscall·seek(SB) - -TEXT ·exit(SB),NOSPLIT,$4-4 - JMP syscall·exit(SB) diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s b/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s deleted file mode 100644 index d3448e67..00000000 --- a/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// -// System call support for amd64, Plan 9 -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-64 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-88 - JMP syscall·Syscall6(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) - -TEXT ·seek(SB),NOSPLIT,$0-56 - JMP syscall·seek(SB) - -TEXT ·exit(SB),NOSPLIT,$8-8 - JMP syscall·exit(SB) diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s b/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s deleted file mode 100644 index afb7c0a9..00000000 --- a/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// System call support for plan9 on arm - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-32 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-44 - JMP syscall·Syscall6(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) - -TEXT ·seek(SB),NOSPLIT,$0-36 - JMP syscall·exit(SB) diff --git a/vendor/golang.org/x/sys/plan9/const_plan9.go b/vendor/golang.org/x/sys/plan9/const_plan9.go deleted file mode 100644 index b4e85a3a..00000000 --- a/vendor/golang.org/x/sys/plan9/const_plan9.go +++ /dev/null @@ -1,70 +0,0 @@ -package plan9 - -// Plan 9 Constants - -// Open modes -const ( - O_RDONLY = 0 - O_WRONLY = 1 - O_RDWR = 2 - O_TRUNC = 16 - O_CLOEXEC = 32 - O_EXCL = 0x1000 -) - -// Rfork flags -const ( - RFNAMEG = 1 << 0 - RFENVG = 1 << 1 - RFFDG = 1 << 2 - RFNOTEG = 1 << 3 - RFPROC = 1 << 4 - RFMEM = 1 << 5 - RFNOWAIT = 1 << 6 - RFCNAMEG = 1 << 10 - RFCENVG = 1 << 11 - RFCFDG = 1 << 12 - RFREND = 1 << 13 - RFNOMNT = 1 << 14 -) - -// Qid.Type bits -const ( - QTDIR = 0x80 - QTAPPEND = 0x40 - QTEXCL = 0x20 - QTMOUNT = 0x10 - QTAUTH = 0x08 - QTTMP = 0x04 - QTFILE = 0x00 -) - -// Dir.Mode bits -const ( - DMDIR = 0x80000000 - DMAPPEND = 0x40000000 - DMEXCL = 0x20000000 - DMMOUNT = 0x10000000 - DMAUTH = 0x08000000 - DMTMP = 0x04000000 - DMREAD = 0x4 - DMWRITE = 0x2 - DMEXEC = 0x1 -) - -const ( - STATMAX = 65535 - ERRMAX = 128 - STATFIXLEN = 49 -) - -// Mount and bind flags -const ( - MREPL = 0x0000 - MBEFORE = 0x0001 - MAFTER = 0x0002 - MORDER = 0x0003 - MCREATE = 0x0004 - MCACHE = 0x0010 - MMASK = 0x0017 -) diff --git a/vendor/golang.org/x/sys/plan9/dir_plan9.go b/vendor/golang.org/x/sys/plan9/dir_plan9.go deleted file mode 100644 index 0955e0c5..00000000 --- a/vendor/golang.org/x/sys/plan9/dir_plan9.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Plan 9 directory marshalling. See intro(5). - -package plan9 - -import "errors" - -var ( - ErrShortStat = errors.New("stat buffer too short") - ErrBadStat = errors.New("malformed stat buffer") - ErrBadName = errors.New("bad character in file name") -) - -// A Qid represents a 9P server's unique identification for a file. -type Qid struct { - Path uint64 // the file server's unique identification for the file - Vers uint32 // version number for given Path - Type uint8 // the type of the file (plan9.QTDIR for example) -} - -// A Dir contains the metadata for a file. -type Dir struct { - // system-modified data - Type uint16 // server type - Dev uint32 // server subtype - - // file data - Qid Qid // unique id from server - Mode uint32 // permissions - Atime uint32 // last read time - Mtime uint32 // last write time - Length int64 // file length - Name string // last element of path - Uid string // owner name - Gid string // group name - Muid string // last modifier name -} - -var nullDir = Dir{ - Type: ^uint16(0), - Dev: ^uint32(0), - Qid: Qid{ - Path: ^uint64(0), - Vers: ^uint32(0), - Type: ^uint8(0), - }, - Mode: ^uint32(0), - Atime: ^uint32(0), - Mtime: ^uint32(0), - Length: ^int64(0), -} - -// Null assigns special "don't touch" values to members of d to -// avoid modifying them during plan9.Wstat. -func (d *Dir) Null() { *d = nullDir } - -// Marshal encodes a 9P stat message corresponding to d into b -// -// If there isn't enough space in b for a stat message, ErrShortStat is returned. -func (d *Dir) Marshal(b []byte) (n int, err error) { - n = STATFIXLEN + len(d.Name) + len(d.Uid) + len(d.Gid) + len(d.Muid) - if n > len(b) { - return n, ErrShortStat - } - - for _, c := range d.Name { - if c == '/' { - return n, ErrBadName - } - } - - b = pbit16(b, uint16(n)-2) - b = pbit16(b, d.Type) - b = pbit32(b, d.Dev) - b = pbit8(b, d.Qid.Type) - b = pbit32(b, d.Qid.Vers) - b = pbit64(b, d.Qid.Path) - b = pbit32(b, d.Mode) - b = pbit32(b, d.Atime) - b = pbit32(b, d.Mtime) - b = pbit64(b, uint64(d.Length)) - b = pstring(b, d.Name) - b = pstring(b, d.Uid) - b = pstring(b, d.Gid) - b = pstring(b, d.Muid) - - return n, nil -} - -// UnmarshalDir decodes a single 9P stat message from b and returns the resulting Dir. -// -// If b is too small to hold a valid stat message, ErrShortStat is returned. -// -// If the stat message itself is invalid, ErrBadStat is returned. -func UnmarshalDir(b []byte) (*Dir, error) { - if len(b) < STATFIXLEN { - return nil, ErrShortStat - } - size, buf := gbit16(b) - if len(b) != int(size)+2 { - return nil, ErrBadStat - } - b = buf - - var d Dir - d.Type, b = gbit16(b) - d.Dev, b = gbit32(b) - d.Qid.Type, b = gbit8(b) - d.Qid.Vers, b = gbit32(b) - d.Qid.Path, b = gbit64(b) - d.Mode, b = gbit32(b) - d.Atime, b = gbit32(b) - d.Mtime, b = gbit32(b) - - n, b := gbit64(b) - d.Length = int64(n) - - var ok bool - if d.Name, b, ok = gstring(b); !ok { - return nil, ErrBadStat - } - if d.Uid, b, ok = gstring(b); !ok { - return nil, ErrBadStat - } - if d.Gid, b, ok = gstring(b); !ok { - return nil, ErrBadStat - } - if d.Muid, b, ok = gstring(b); !ok { - return nil, ErrBadStat - } - - return &d, nil -} - -// pbit8 copies the 8-bit number v to b and returns the remaining slice of b. -func pbit8(b []byte, v uint8) []byte { - b[0] = byte(v) - return b[1:] -} - -// pbit16 copies the 16-bit number v to b in little-endian order and returns the remaining slice of b. -func pbit16(b []byte, v uint16) []byte { - b[0] = byte(v) - b[1] = byte(v >> 8) - return b[2:] -} - -// pbit32 copies the 32-bit number v to b in little-endian order and returns the remaining slice of b. -func pbit32(b []byte, v uint32) []byte { - b[0] = byte(v) - b[1] = byte(v >> 8) - b[2] = byte(v >> 16) - b[3] = byte(v >> 24) - return b[4:] -} - -// pbit64 copies the 64-bit number v to b in little-endian order and returns the remaining slice of b. -func pbit64(b []byte, v uint64) []byte { - b[0] = byte(v) - b[1] = byte(v >> 8) - b[2] = byte(v >> 16) - b[3] = byte(v >> 24) - b[4] = byte(v >> 32) - b[5] = byte(v >> 40) - b[6] = byte(v >> 48) - b[7] = byte(v >> 56) - return b[8:] -} - -// pstring copies the string s to b, prepending it with a 16-bit length in little-endian order, and -// returning the remaining slice of b.. -func pstring(b []byte, s string) []byte { - b = pbit16(b, uint16(len(s))) - n := copy(b, s) - return b[n:] -} - -// gbit8 reads an 8-bit number from b and returns it with the remaining slice of b. -func gbit8(b []byte) (uint8, []byte) { - return uint8(b[0]), b[1:] -} - -// gbit16 reads a 16-bit number in little-endian order from b and returns it with the remaining slice of b. -func gbit16(b []byte) (uint16, []byte) { - return uint16(b[0]) | uint16(b[1])<<8, b[2:] -} - -// gbit32 reads a 32-bit number in little-endian order from b and returns it with the remaining slice of b. -func gbit32(b []byte) (uint32, []byte) { - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24, b[4:] -} - -// gbit64 reads a 64-bit number in little-endian order from b and returns it with the remaining slice of b. -func gbit64(b []byte) (uint64, []byte) { - lo := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - hi := uint32(b[4]) | uint32(b[5])<<8 | uint32(b[6])<<16 | uint32(b[7])<<24 - return uint64(lo) | uint64(hi)<<32, b[8:] -} - -// gstring reads a string from b, prefixed with a 16-bit length in little-endian order. -// It returns the string with the remaining slice of b and a boolean. If the length is -// greater than the number of bytes in b, the boolean will be false. -func gstring(b []byte) (string, []byte, bool) { - n, b := gbit16(b) - if int(n) > len(b) { - return "", b, false - } - return string(b[:n]), b[n:], true -} diff --git a/vendor/golang.org/x/sys/plan9/env_plan9.go b/vendor/golang.org/x/sys/plan9/env_plan9.go deleted file mode 100644 index 8f191800..00000000 --- a/vendor/golang.org/x/sys/plan9/env_plan9.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Plan 9 environment variables. - -package plan9 - -import ( - "syscall" -) - -func Getenv(key string) (value string, found bool) { - return syscall.Getenv(key) -} - -func Setenv(key, value string) error { - return syscall.Setenv(key, value) -} - -func Clearenv() { - syscall.Clearenv() -} - -func Environ() []string { - return syscall.Environ() -} - -func Unsetenv(key string) error { - return syscall.Unsetenv(key) -} diff --git a/vendor/golang.org/x/sys/plan9/errors_plan9.go b/vendor/golang.org/x/sys/plan9/errors_plan9.go deleted file mode 100644 index 65fe74d3..00000000 --- a/vendor/golang.org/x/sys/plan9/errors_plan9.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package plan9 - -import "syscall" - -// Constants -const ( - // Invented values to support what package os expects. - O_CREAT = 0x02000 - O_APPEND = 0x00400 - O_NOCTTY = 0x00000 - O_NONBLOCK = 0x00000 - O_SYNC = 0x00000 - O_ASYNC = 0x00000 - - S_IFMT = 0x1f000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 -) - -// Errors -var ( - EINVAL = syscall.NewError("bad arg in system call") - ENOTDIR = syscall.NewError("not a directory") - EISDIR = syscall.NewError("file is a directory") - ENOENT = syscall.NewError("file does not exist") - EEXIST = syscall.NewError("file already exists") - EMFILE = syscall.NewError("no free file descriptors") - EIO = syscall.NewError("i/o error") - ENAMETOOLONG = syscall.NewError("file name too long") - EINTR = syscall.NewError("interrupted") - EPERM = syscall.NewError("permission denied") - EBUSY = syscall.NewError("no free devices") - ETIMEDOUT = syscall.NewError("connection timed out") - EPLAN9 = syscall.NewError("not supported by plan 9") - - // The following errors do not correspond to any - // Plan 9 system messages. Invented to support - // what package os and others expect. - EACCES = syscall.NewError("access permission denied") - EAFNOSUPPORT = syscall.NewError("address family not supported by protocol") -) diff --git a/vendor/golang.org/x/sys/plan9/mkall.sh b/vendor/golang.org/x/sys/plan9/mkall.sh deleted file mode 100644 index 1650fbcc..00000000 --- a/vendor/golang.org/x/sys/plan9/mkall.sh +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# The plan9 package provides access to the raw system call -# interface of the underlying operating system. Porting Go to -# a new architecture/operating system combination requires -# some manual effort, though there are tools that automate -# much of the process. The auto-generated files have names -# beginning with z. -# -# This script runs or (given -n) prints suggested commands to generate z files -# for the current system. Running those commands is not automatic. -# This script is documentation more than anything else. -# -# * asm_${GOOS}_${GOARCH}.s -# -# This hand-written assembly file implements system call dispatch. -# There are three entry points: -# -# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); -# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr); -# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); -# -# The first and second are the standard ones; they differ only in -# how many arguments can be passed to the kernel. -# The third is for low-level use by the ForkExec wrapper; -# unlike the first two, it does not call into the scheduler to -# let it know that a system call is running. -# -# * syscall_${GOOS}.go -# -# This hand-written Go file implements system calls that need -# special handling and lists "//sys" comments giving prototypes -# for ones that can be auto-generated. Mksyscall reads those -# comments to generate the stubs. -# -# * syscall_${GOOS}_${GOARCH}.go -# -# Same as syscall_${GOOS}.go except that it contains code specific -# to ${GOOS} on one particular architecture. -# -# * types_${GOOS}.c -# -# This hand-written C file includes standard C headers and then -# creates typedef or enum names beginning with a dollar sign -# (use of $ in variable names is a gcc extension). The hardest -# part about preparing this file is figuring out which headers to -# include and which symbols need to be #defined to get the -# actual data structures that pass through to the kernel system calls. -# Some C libraries present alternate versions for binary compatibility -# and translate them on the way in and out of system calls, but -# there is almost always a #define that can get the real ones. -# See types_darwin.c and types_linux.c for examples. -# -# * zerror_${GOOS}_${GOARCH}.go -# -# This machine-generated file defines the system's error numbers, -# error strings, and signal numbers. The generator is "mkerrors.sh". -# Usually no arguments are needed, but mkerrors.sh will pass its -# arguments on to godefs. -# -# * zsyscall_${GOOS}_${GOARCH}.go -# -# Generated by mksyscall.pl; see syscall_${GOOS}.go above. -# -# * zsysnum_${GOOS}_${GOARCH}.go -# -# Generated by mksysnum_${GOOS}. -# -# * ztypes_${GOOS}_${GOARCH}.go -# -# Generated by godefs; see types_${GOOS}.c above. - -GOOSARCH="${GOOS}_${GOARCH}" - -# defaults -mksyscall="go run mksyscall.go" -mkerrors="./mkerrors.sh" -zerrors="zerrors_$GOOSARCH.go" -mksysctl="" -zsysctl="zsysctl_$GOOSARCH.go" -mksysnum= -mktypes= -run="sh" - -case "$1" in --syscalls) - for i in zsyscall*go - do - sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i - rm _$i - done - exit 0 - ;; --n) - run="cat" - shift -esac - -case "$#" in -0) - ;; -*) - echo 'usage: mkall.sh [-n]' 1>&2 - exit 2 -esac - -case "$GOOSARCH" in -_* | *_ | _) - echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 - exit 1 - ;; -plan9_386) - mkerrors= - mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,386" - mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h" - mktypes="XXX" - ;; -plan9_amd64) - mkerrors= - mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,amd64" - mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h" - mktypes="XXX" - ;; -plan9_arm) - mkerrors= - mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,arm" - mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h" - mktypes="XXX" - ;; -*) - echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 - exit 1 - ;; -esac - -( - if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi - case "$GOOS" in - plan9) - syscall_goos="syscall_$GOOS.go" - if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos |gofmt >zsyscall_$GOOSARCH.go"; fi - ;; - esac - if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi - if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi - if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |gofmt >ztypes_$GOOSARCH.go"; fi -) | $run diff --git a/vendor/golang.org/x/sys/plan9/mkerrors.sh b/vendor/golang.org/x/sys/plan9/mkerrors.sh deleted file mode 100644 index 526d04ab..00000000 --- a/vendor/golang.org/x/sys/plan9/mkerrors.sh +++ /dev/null @@ -1,246 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# Generate Go code listing errors and other #defined constant -# values (ENAMETOOLONG etc.), by asking the preprocessor -# about the definitions. - -unset LANG -export LC_ALL=C -export LC_CTYPE=C - -CC=${CC:-gcc} - -uname=$(uname) - -includes=' -#include <sys/types.h> -#include <sys/file.h> -#include <fcntl.h> -#include <dirent.h> -#include <sys/socket.h> -#include <netinet/in.h> -#include <netinet/ip.h> -#include <netinet/ip6.h> -#include <netinet/tcp.h> -#include <errno.h> -#include <sys/signal.h> -#include <signal.h> -#include <sys/resource.h> -' - -ccflags="$@" - -# Write go tool cgo -godefs input. -( - echo package plan9 - echo - echo '/*' - indirect="includes_$(uname)" - echo "${!indirect} $includes" - echo '*/' - echo 'import "C"' - echo - echo 'const (' - - # The gcc command line prints all the #defines - # it encounters while processing the input - echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags | - awk ' - $1 != "#define" || $2 ~ /\(/ || $3 == "" {next} - - $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers - $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next} - $2 ~ /^(SCM_SRCRT)$/ {next} - $2 ~ /^(MAP_FAILED)$/ {next} - - $2 !~ /^ETH_/ && - $2 !~ /^EPROC_/ && - $2 !~ /^EQUIV_/ && - $2 !~ /^EXPR_/ && - $2 ~ /^E[A-Z0-9_]+$/ || - $2 ~ /^B[0-9_]+$/ || - $2 ~ /^V[A-Z0-9]+$/ || - $2 ~ /^CS[A-Z0-9]/ || - $2 ~ /^I(SIG|CANON|CRNL|EXTEN|MAXBEL|STRIP|UTF8)$/ || - $2 ~ /^IGN/ || - $2 ~ /^IX(ON|ANY|OFF)$/ || - $2 ~ /^IN(LCR|PCK)$/ || - $2 ~ /(^FLU?SH)|(FLU?SH$)/ || - $2 ~ /^C(LOCAL|READ)$/ || - $2 == "BRKINT" || - $2 == "HUPCL" || - $2 == "PENDIN" || - $2 == "TOSTOP" || - $2 ~ /^PAR/ || - $2 ~ /^SIG[^_]/ || - $2 ~ /^O[CNPFP][A-Z]+[^_][A-Z]+$/ || - $2 ~ /^IN_/ || - $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || - $2 == "ICMPV6_FILTER" || - $2 == "SOMAXCONN" || - $2 == "NAME_MAX" || - $2 == "IFNAMSIZ" || - $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ || - $2 ~ /^SYSCTL_VERS/ || - $2 ~ /^(MS|MNT)_/ || - $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ || - $2 ~ /^LINUX_REBOOT_CMD_/ || - $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || - $2 !~ "NLA_TYPE_MASK" && - $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ || - $2 ~ /^SIOC/ || - $2 ~ /^TIOC/ || - $2 !~ "RTF_BITS" && - $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || - $2 ~ /^BIOC/ || - $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || - $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ || - $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || - $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL)$/ && - $2 ~ /^(BPF|DLT)_/ || - $2 !~ "WMESGLEN" && - $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)} - $2 ~ /^__WCOREFLAG$/ {next} - $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} - - {next} - ' | sort - - echo ')' -) >_const.go - -# Pull out the error names for later. -errors=$( - echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' | - sort -) - -# Pull out the signal names for later. -signals=$( - echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT' | - sort -) - -# Again, writing regexps to a file. -echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' | - sort >_error.grep -echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT' | - sort >_signal.grep - -echo '// mkerrors.sh' "$@" -echo '// Code generated by the command above; DO NOT EDIT.' -echo -go tool cgo -godefs -- "$@" _const.go >_error.out -cat _error.out | grep -vf _error.grep | grep -vf _signal.grep -echo -echo '// Errors' -echo 'const (' -cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= Errno(\1)/' -echo ')' - -echo -echo '// Signals' -echo 'const (' -cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= Signal(\1)/' -echo ')' - -# Run C program to print error and syscall strings. -( - echo -E " -#include <stdio.h> -#include <stdlib.h> -#include <errno.h> -#include <ctype.h> -#include <string.h> -#include <signal.h> - -#define nelem(x) (sizeof(x)/sizeof((x)[0])) - -enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below - -int errors[] = { -" - for i in $errors - do - echo -E ' '$i, - done - - echo -E " -}; - -int signals[] = { -" - for i in $signals - do - echo -E ' '$i, - done - - # Use -E because on some systems bash builtin interprets \n itself. - echo -E ' -}; - -static int -intcmp(const void *a, const void *b) -{ - return *(int*)a - *(int*)b; -} - -int -main(void) -{ - int i, j, e; - char buf[1024], *p; - - printf("\n\n// Error table\n"); - printf("var errors = [...]string {\n"); - qsort(errors, nelem(errors), sizeof errors[0], intcmp); - for(i=0; i<nelem(errors); i++) { - e = errors[i]; - if(i > 0 && errors[i-1] == e) - continue; - strcpy(buf, strerror(e)); - // lowercase first letter: Bad -> bad, but STREAM -> STREAM. - if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) - buf[0] += a - A; - printf("\t%d: \"%s\",\n", e, buf); - } - printf("}\n\n"); - - printf("\n\n// Signal table\n"); - printf("var signals = [...]string {\n"); - qsort(signals, nelem(signals), sizeof signals[0], intcmp); - for(i=0; i<nelem(signals); i++) { - e = signals[i]; - if(i > 0 && signals[i-1] == e) - continue; - strcpy(buf, strsignal(e)); - // lowercase first letter: Bad -> bad, but STREAM -> STREAM. - if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) - buf[0] += a - A; - // cut trailing : number. - p = strrchr(buf, ":"[0]); - if(p) - *p = '\0'; - printf("\t%d: \"%s\",\n", e, buf); - } - printf("}\n\n"); - - return 0; -} - -' -) >_errors.c - -$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out diff --git a/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh b/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh deleted file mode 100644 index 3c3ab058..00000000 --- a/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -COMMAND="mksysnum_plan9.sh $@" - -cat <<EOF -// $COMMAND -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT - -package plan9 - -const( -EOF - -SP='[ ]' # space or tab -sed "s/^#define${SP}\\([A-Z0-9_][A-Z0-9_]*\\)${SP}${SP}*\\([0-9][0-9]*\\)/SYS_\\1=\\2/g" \ - < $1 | grep -v SYS__ - -cat <<EOF -) -EOF diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go deleted file mode 100644 index c9b69937..00000000 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.5 -// +build go1.5 - -package plan9 - -import "syscall" - -func fixwd() { - syscall.Fixwd() -} - -func Getwd() (wd string, err error) { - return syscall.Getwd() -} - -func Chdir(path string) error { - return syscall.Chdir(path) -} diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go deleted file mode 100644 index 98bf56b7..00000000 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.5 -// +build !go1.5 - -package plan9 - -func fixwd() { -} - -func Getwd() (wd string, err error) { - fd, err := open(".", O_RDONLY) - if err != nil { - return "", err - } - defer Close(fd) - return Fd2path(fd) -} - -func Chdir(path string) error { - return chdir(path) -} diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go deleted file mode 100644 index 62377d2f..00000000 --- a/vendor/golang.org/x/sys/plan9/race.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build plan9 && race -// +build plan9,race - -package plan9 - -import ( - "runtime" - "unsafe" -) - -const raceenabled = true - -func raceAcquire(addr unsafe.Pointer) { - runtime.RaceAcquire(addr) -} - -func raceReleaseMerge(addr unsafe.Pointer) { - runtime.RaceReleaseMerge(addr) -} - -func raceReadRange(addr unsafe.Pointer, len int) { - runtime.RaceReadRange(addr, len) -} - -func raceWriteRange(addr unsafe.Pointer, len int) { - runtime.RaceWriteRange(addr, len) -} diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go deleted file mode 100644 index f8da3087..00000000 --- a/vendor/golang.org/x/sys/plan9/race0.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build plan9 && !race -// +build plan9,!race - -package plan9 - -import ( - "unsafe" -) - -const raceenabled = false - -func raceAcquire(addr unsafe.Pointer) { -} - -func raceReleaseMerge(addr unsafe.Pointer) { -} - -func raceReadRange(addr unsafe.Pointer, len int) { -} - -func raceWriteRange(addr unsafe.Pointer, len int) { -} diff --git a/vendor/golang.org/x/sys/plan9/str.go b/vendor/golang.org/x/sys/plan9/str.go deleted file mode 100644 index 55fa8d02..00000000 --- a/vendor/golang.org/x/sys/plan9/str.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build plan9 -// +build plan9 - -package plan9 - -func itoa(val int) string { // do it here rather than with fmt to avoid dependency - if val < 0 { - return "-" + itoa(-val) - } - var buf [32]byte // big enough for int64 - i := len(buf) - 1 - for val >= 10 { - buf[i] = byte(val%10 + '0') - i-- - val /= 10 - } - buf[i] = byte(val + '0') - return string(buf[i:]) -} diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go deleted file mode 100644 index 67e5b011..00000000 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build plan9 -// +build plan9 - -// Package plan9 contains an interface to the low-level operating system -// primitives. OS details vary depending on the underlying system, and -// by default, godoc will display the OS-specific documentation for the current -// system. If you want godoc to display documentation for another -// system, set $GOOS and $GOARCH to the desired system. For example, if -// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS -// to freebsd and $GOARCH to arm. -// -// The primary use of this package is inside other packages that provide a more -// portable interface to the system, such as "os", "time" and "net". Use -// those packages rather than this one if you can. -// -// For details of the functions and data types in this package consult -// the manuals for the appropriate operating system. -// -// These calls return err == nil to indicate success; otherwise -// err represents an operating system error describing the failure and -// holds a value of type syscall.ErrorString. -package plan9 // import "golang.org/x/sys/plan9" - -import ( - "bytes" - "strings" - "unsafe" -) - -// ByteSliceFromString returns a NUL-terminated slice of bytes -// containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, EINVAL). -func ByteSliceFromString(s string) ([]byte, error) { - if strings.IndexByte(s, 0) != -1 { - return nil, EINVAL - } - a := make([]byte, len(s)+1) - copy(a, s) - return a, nil -} - -// BytePtrFromString returns a pointer to a NUL-terminated array of -// bytes containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, EINVAL). -func BytePtrFromString(s string) (*byte, error) { - a, err := ByteSliceFromString(s) - if err != nil { - return nil, err - } - return &a[0], nil -} - -// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any -// bytes after the NUL removed. -func ByteSliceToString(s []byte) string { - if i := bytes.IndexByte(s, 0); i != -1 { - s = s[:i] - } - return string(s) -} - -// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string. -// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated -// at a zero byte; if the zero byte is not present, the program may crash. -func BytePtrToString(p *byte) string { - if p == nil { - return "" - } - if *p == 0 { - return "" - } - - // Find NUL terminator. - n := 0 - for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ { - ptr = unsafe.Pointer(uintptr(ptr) + 1) - } - - return string(unsafe.Slice(p, n)) -} - -// Single-word zero for use when we need a valid pointer to 0 bytes. -// See mksyscall.pl. -var _zero uintptr - -func (ts *Timespec) Unix() (sec int64, nsec int64) { - return int64(ts.Sec), int64(ts.Nsec) -} - -func (tv *Timeval) Unix() (sec int64, nsec int64) { - return int64(tv.Sec), int64(tv.Usec) * 1000 -} - -func (ts *Timespec) Nano() int64 { - return int64(ts.Sec)*1e9 + int64(ts.Nsec) -} - -func (tv *Timeval) Nano() int64 { - return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -// -//go:noescape -func use(p unsafe.Pointer) diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go deleted file mode 100644 index d079d811..00000000 --- a/vendor/golang.org/x/sys/plan9/syscall_plan9.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Plan 9 system calls. -// This file is compiled as ordinary Go code, -// but it is also input to mksyscall, -// which parses the //sys lines and generates system call stubs. -// Note that sometimes we use a lowercase //sys name and -// wrap it in our own nicer implementation. - -package plan9 - -import ( - "bytes" - "syscall" - "unsafe" -) - -// A Note is a string describing a process note. -// It implements the os.Signal interface. -type Note string - -func (n Note) Signal() {} - -func (n Note) String() string { - return string(n) -} - -var ( - Stdin = 0 - Stdout = 1 - Stderr = 2 -) - -// For testing: clients can set this flag to force -// creation of IPv6 sockets to return EAFNOSUPPORT. -var SocketDisableIPv6 bool - -func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.ErrorString) -func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.ErrorString) -func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) -func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) - -func atoi(b []byte) (n uint) { - n = 0 - for i := 0; i < len(b); i++ { - n = n*10 + uint(b[i]-'0') - } - return -} - -func cstring(s []byte) string { - i := bytes.IndexByte(s, 0) - if i == -1 { - i = len(s) - } - return string(s[:i]) -} - -func errstr() string { - var buf [ERRMAX]byte - - RawSyscall(SYS_ERRSTR, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)), 0) - - buf[len(buf)-1] = 0 - return cstring(buf[:]) -} - -// Implemented in assembly to import from runtime. -func exit(code int) - -func Exit(code int) { exit(code) } - -func readnum(path string) (uint, error) { - var b [12]byte - - fd, e := Open(path, O_RDONLY) - if e != nil { - return 0, e - } - defer Close(fd) - - n, e := Pread(fd, b[:], 0) - - if e != nil { - return 0, e - } - - m := 0 - for ; m < n && b[m] == ' '; m++ { - } - - return atoi(b[m : n-1]), nil -} - -func Getpid() (pid int) { - n, _ := readnum("#c/pid") - return int(n) -} - -func Getppid() (ppid int) { - n, _ := readnum("#c/ppid") - return int(n) -} - -func Read(fd int, p []byte) (n int, err error) { - return Pread(fd, p, -1) -} - -func Write(fd int, p []byte) (n int, err error) { - return Pwrite(fd, p, -1) -} - -var ioSync int64 - -//sys fd2path(fd int, buf []byte) (err error) - -func Fd2path(fd int) (path string, err error) { - var buf [512]byte - - e := fd2path(fd, buf[:]) - if e != nil { - return "", e - } - return cstring(buf[:]), nil -} - -//sys pipe(p *[2]int32) (err error) - -func Pipe(p []int) (err error) { - if len(p) != 2 { - return syscall.ErrorString("bad arg in system call") - } - var pp [2]int32 - err = pipe(&pp) - if err == nil { - p[0] = int(pp[0]) - p[1] = int(pp[1]) - } - return -} - -// Underlying system call writes to newoffset via pointer. -// Implemented in assembly to avoid allocation. -func seek(placeholder uintptr, fd int, offset int64, whence int) (newoffset int64, err string) - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - newoffset, e := seek(0, fd, offset, whence) - - if newoffset == -1 { - err = syscall.ErrorString(e) - } - return -} - -func Mkdir(path string, mode uint32) (err error) { - fd, err := Create(path, O_RDONLY, DMDIR|mode) - - if fd != -1 { - Close(fd) - } - - return -} - -type Waitmsg struct { - Pid int - Time [3]uint32 - Msg string -} - -func (w Waitmsg) Exited() bool { return true } -func (w Waitmsg) Signaled() bool { return false } - -func (w Waitmsg) ExitStatus() int { - if len(w.Msg) == 0 { - // a normal exit returns no message - return 0 - } - return 1 -} - -//sys await(s []byte) (n int, err error) - -func Await(w *Waitmsg) (err error) { - var buf [512]byte - var f [5][]byte - - n, err := await(buf[:]) - - if err != nil || w == nil { - return - } - - nf := 0 - p := 0 - for i := 0; i < n && nf < len(f)-1; i++ { - if buf[i] == ' ' { - f[nf] = buf[p:i] - p = i + 1 - nf++ - } - } - f[nf] = buf[p:] - nf++ - - if nf != len(f) { - return syscall.ErrorString("invalid wait message") - } - w.Pid = int(atoi(f[0])) - w.Time[0] = uint32(atoi(f[1])) - w.Time[1] = uint32(atoi(f[2])) - w.Time[2] = uint32(atoi(f[3])) - w.Msg = cstring(f[4]) - if w.Msg == "''" { - // await() returns '' for no error - w.Msg = "" - } - return -} - -func Unmount(name, old string) (err error) { - fixwd() - oldp, err := BytePtrFromString(old) - if err != nil { - return err - } - oldptr := uintptr(unsafe.Pointer(oldp)) - - var r0 uintptr - var e syscall.ErrorString - - // bind(2) man page: If name is zero, everything bound or mounted upon old is unbound or unmounted. - if name == "" { - r0, _, e = Syscall(SYS_UNMOUNT, _zero, oldptr, 0) - } else { - namep, err := BytePtrFromString(name) - if err != nil { - return err - } - r0, _, e = Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(namep)), oldptr, 0) - } - - if int32(r0) == -1 { - err = e - } - return -} - -func Fchdir(fd int) (err error) { - path, err := Fd2path(fd) - - if err != nil { - return - } - - return Chdir(path) -} - -type Timespec struct { - Sec int32 - Nsec int32 -} - -type Timeval struct { - Sec int32 - Usec int32 -} - -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int32(nsec / 1e9) - return -} - -func nsec() int64 { - var scratch int64 - - r0, _, _ := Syscall(SYS_NSEC, uintptr(unsafe.Pointer(&scratch)), 0, 0) - // TODO(aram): remove hack after I fix _nsec in the pc64 kernel. - if r0 == 0 { - return scratch - } - return int64(r0) -} - -func Gettimeofday(tv *Timeval) error { - nsec := nsec() - *tv = NsecToTimeval(nsec) - return nil -} - -func Getpagesize() int { return 0x1000 } - -func Getegid() (egid int) { return -1 } -func Geteuid() (euid int) { return -1 } -func Getgid() (gid int) { return -1 } -func Getuid() (uid int) { return -1 } - -func Getgroups() (gids []int, err error) { - return make([]int, 0), nil -} - -//sys open(path string, mode int) (fd int, err error) - -func Open(path string, mode int) (fd int, err error) { - fixwd() - return open(path, mode) -} - -//sys create(path string, mode int, perm uint32) (fd int, err error) - -func Create(path string, mode int, perm uint32) (fd int, err error) { - fixwd() - return create(path, mode, perm) -} - -//sys remove(path string) (err error) - -func Remove(path string) error { - fixwd() - return remove(path) -} - -//sys stat(path string, edir []byte) (n int, err error) - -func Stat(path string, edir []byte) (n int, err error) { - fixwd() - return stat(path, edir) -} - -//sys bind(name string, old string, flag int) (err error) - -func Bind(name string, old string, flag int) (err error) { - fixwd() - return bind(name, old, flag) -} - -//sys mount(fd int, afd int, old string, flag int, aname string) (err error) - -func Mount(fd int, afd int, old string, flag int, aname string) (err error) { - fixwd() - return mount(fd, afd, old, flag, aname) -} - -//sys wstat(path string, edir []byte) (err error) - -func Wstat(path string, edir []byte) (err error) { - fixwd() - return wstat(path, edir) -} - -//sys chdir(path string) (err error) -//sys Dup(oldfd int, newfd int) (fd int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) -//sys Close(fd int) (err error) -//sys Fstat(fd int, edir []byte) (n int, err error) -//sys Fwstat(fd int, edir []byte) (err error) diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go deleted file mode 100644 index 3f40b9bd..00000000 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +++ /dev/null @@ -1,285 +0,0 @@ -// go run mksyscall.go -l32 -plan9 -tags plan9,386 syscall_plan9.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build plan9 && 386 -// +build plan9,386 - -package plan9 - -import "unsafe" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fd2path(fd int, buf []byte) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]int32) (err error) { - r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func await(s []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(s) > 0 { - _p0 = unsafe.Pointer(&s[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func open(path string, mode int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - fd = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func create(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func remove(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func stat(path string, edir []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(edir) > 0 { - _p1 = unsafe.Pointer(&edir[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(name string, old string, flag int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(old) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(fd int, afd int, old string, flag int, aname string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(old) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(aname) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wstat(path string, edir []byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(edir) > 0 { - _p1 = unsafe.Pointer(&edir[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int, newfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0) - fd = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, edir []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(edir) > 0 { - _p0 = unsafe.Pointer(&edir[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fwstat(fd int, edir []byte) (err error) { - var _p0 unsafe.Pointer - if len(edir) > 0 { - _p0 = unsafe.Pointer(&edir[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) - if int32(r0) == -1 { - err = e1 - } - return -} diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go deleted file mode 100644 index 0e6a96aa..00000000 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +++ /dev/null @@ -1,285 +0,0 @@ -// go run mksyscall.go -l32 -plan9 -tags plan9,amd64 syscall_plan9.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build plan9 && amd64 -// +build plan9,amd64 - -package plan9 - -import "unsafe" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fd2path(fd int, buf []byte) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]int32) (err error) { - r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func await(s []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(s) > 0 { - _p0 = unsafe.Pointer(&s[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func open(path string, mode int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - fd = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func create(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func remove(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func stat(path string, edir []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(edir) > 0 { - _p1 = unsafe.Pointer(&edir[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(name string, old string, flag int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(old) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(fd int, afd int, old string, flag int, aname string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(old) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(aname) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wstat(path string, edir []byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(edir) > 0 { - _p1 = unsafe.Pointer(&edir[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int, newfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0) - fd = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, edir []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(edir) > 0 { - _p0 = unsafe.Pointer(&edir[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fwstat(fd int, edir []byte) (err error) { - var _p0 unsafe.Pointer - if len(edir) > 0 { - _p0 = unsafe.Pointer(&edir[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) - if int32(r0) == -1 { - err = e1 - } - return -} diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go deleted file mode 100644 index 244c501b..00000000 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +++ /dev/null @@ -1,285 +0,0 @@ -// go run mksyscall.go -l32 -plan9 -tags plan9,arm syscall_plan9.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build plan9 && arm -// +build plan9,arm - -package plan9 - -import "unsafe" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fd2path(fd int, buf []byte) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]int32) (err error) { - r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func await(s []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(s) > 0 { - _p0 = unsafe.Pointer(&s[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func open(path string, mode int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - fd = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func create(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func remove(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func stat(path string, edir []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(edir) > 0 { - _p1 = unsafe.Pointer(&edir[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(name string, old string, flag int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(old) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(fd int, afd int, old string, flag int, aname string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(old) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(aname) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wstat(path string, edir []byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(edir) > 0 { - _p1 = unsafe.Pointer(&edir[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int, newfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0) - fd = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, edir []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(edir) > 0 { - _p0 = unsafe.Pointer(&edir[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) - n = int(r0) - if int32(r0) == -1 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fwstat(fd int, edir []byte) (err error) { - var _p0 unsafe.Pointer - if len(edir) > 0 { - _p0 = unsafe.Pointer(&edir[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) - if int32(r0) == -1 { - err = e1 - } - return -} diff --git a/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go b/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go deleted file mode 100644 index 22e8abd4..00000000 --- a/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go +++ /dev/null @@ -1,49 +0,0 @@ -// mksysnum_plan9.sh /opt/plan9/sys/src/libc/9syscall/sys.h -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT - -package plan9 - -const ( - SYS_SYSR1 = 0 - SYS_BIND = 2 - SYS_CHDIR = 3 - SYS_CLOSE = 4 - SYS_DUP = 5 - SYS_ALARM = 6 - SYS_EXEC = 7 - SYS_EXITS = 8 - SYS_FAUTH = 10 - SYS_SEGBRK = 12 - SYS_OPEN = 14 - SYS_OSEEK = 16 - SYS_SLEEP = 17 - SYS_RFORK = 19 - SYS_PIPE = 21 - SYS_CREATE = 22 - SYS_FD2PATH = 23 - SYS_BRK_ = 24 - SYS_REMOVE = 25 - SYS_NOTIFY = 28 - SYS_NOTED = 29 - SYS_SEGATTACH = 30 - SYS_SEGDETACH = 31 - SYS_SEGFREE = 32 - SYS_SEGFLUSH = 33 - SYS_RENDEZVOUS = 34 - SYS_UNMOUNT = 35 - SYS_SEMACQUIRE = 37 - SYS_SEMRELEASE = 38 - SYS_SEEK = 39 - SYS_FVERSION = 40 - SYS_ERRSTR = 41 - SYS_STAT = 42 - SYS_FSTAT = 43 - SYS_WSTAT = 44 - SYS_FWSTAT = 45 - SYS_MOUNT = 46 - SYS_AWAIT = 47 - SYS_PREAD = 50 - SYS_PWRITE = 51 - SYS_TSEMACQUIRE = 52 - SYS_NSEC = 53 -) diff --git a/vendor/golang.org/x/term/CONTRIBUTING.md b/vendor/golang.org/x/term/CONTRIBUTING.md deleted file mode 100644 index d0485e88..00000000 --- a/vendor/golang.org/x/term/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/term/LICENSE b/vendor/golang.org/x/term/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/term/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/term/PATENTS b/vendor/golang.org/x/term/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/term/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/term/README.md b/vendor/golang.org/x/term/README.md deleted file mode 100644 index d03d0aef..00000000 --- a/vendor/golang.org/x/term/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Go terminal/console support - -[![Go Reference](https://pkg.go.dev/badge/golang.org/x/term.svg)](https://pkg.go.dev/golang.org/x/term) - -This repository provides Go terminal and console support packages. - -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - -## Report Issues / Send Patches - -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - -The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the -subject line, so it is easy to find. diff --git a/vendor/golang.org/x/term/codereview.cfg b/vendor/golang.org/x/term/codereview.cfg deleted file mode 100644 index 3f8b14b6..00000000 --- a/vendor/golang.org/x/term/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/term/term.go b/vendor/golang.org/x/term/term.go deleted file mode 100644 index 1a40d101..00000000 --- a/vendor/golang.org/x/term/term.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package term provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Putting a terminal into raw mode is the most common requirement: -// -// oldState, err := term.MakeRaw(int(os.Stdin.Fd())) -// if err != nil { -// panic(err) -// } -// defer term.Restore(int(os.Stdin.Fd()), oldState) -// -// Note that on non-Unix systems os.Stdin.Fd() may not be 0. -package term - -// State contains the state of a terminal. -type State struct { - state -} - -// IsTerminal returns whether the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - return isTerminal(fd) -} - -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - return makeRaw(fd) -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - return getState(fd) -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func Restore(fd int, oldState *State) error { - return restore(fd, oldState) -} - -// GetSize returns the visible dimensions of the given terminal. -// -// These dimensions don't include any scrollback buffer height. -func GetSize(fd int) (width, height int, err error) { - return getSize(fd) -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - return readPassword(fd) -} diff --git a/vendor/golang.org/x/term/term_plan9.go b/vendor/golang.org/x/term/term_plan9.go deleted file mode 100644 index 21afa55c..00000000 --- a/vendor/golang.org/x/term/term_plan9.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import ( - "fmt" - "runtime" - - "golang.org/x/sys/plan9" -) - -type state struct{} - -func isTerminal(fd int) bool { - path, err := plan9.Fd2path(fd) - if err != nil { - return false - } - return path == "/dev/cons" || path == "/mnt/term/dev/cons" -} - -func makeRaw(fd int) (*State, error) { - return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} - -func getState(fd int) (*State, error) { - return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} - -func restore(fd int, state *State) error { - return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} - -func getSize(fd int) (width, height int, err error) { - return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} - -func readPassword(fd int) ([]byte, error) { - return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} diff --git a/vendor/golang.org/x/term/term_unix.go b/vendor/golang.org/x/term/term_unix.go deleted file mode 100644 index 62c2b3f4..00000000 --- a/vendor/golang.org/x/term/term_unix.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos - -package term - -import ( - "golang.org/x/sys/unix" -) - -type state struct { - termios unix.Termios -} - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} - -func makeRaw(fd int) (*State, error) { - termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - if err != nil { - return nil, err - } - - oldState := State{state{termios: *termios}} - - // This attempts to replicate the behaviour documented for cfmakeraw in - // the termios(3) manpage. - termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON - termios.Oflag &^= unix.OPOST - termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN - termios.Cflag &^= unix.CSIZE | unix.PARENB - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil { - return nil, err - } - - return &oldState, nil -} - -func getState(fd int) (*State, error) { - termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - if err != nil { - return nil, err - } - - return &State{state{termios: *termios}}, nil -} - -func restore(fd int, state *State) error { - return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios) -} - -func getSize(fd int) (width, height int, err error) { - ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return 0, 0, err - } - return int(ws.Col), int(ws.Row), nil -} - -// passwordReader is an io.Reader that reads from a specific file descriptor. -type passwordReader int - -func (r passwordReader) Read(buf []byte) (int, error) { - return unix.Read(int(r), buf) -} - -func readPassword(fd int) ([]byte, error) { - termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - if err != nil { - return nil, err - } - - newState := *termios - newState.Lflag &^= unix.ECHO - newState.Lflag |= unix.ICANON | unix.ISIG - newState.Iflag |= unix.ICRNL - if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil { - return nil, err - } - - defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) - - return readPasswordLine(passwordReader(fd)) -} diff --git a/vendor/golang.org/x/term/term_unix_bsd.go b/vendor/golang.org/x/term/term_unix_bsd.go deleted file mode 100644 index 853b3d69..00000000 --- a/vendor/golang.org/x/term/term_unix_bsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd - -package term - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA -const ioctlWriteTermios = unix.TIOCSETA diff --git a/vendor/golang.org/x/term/term_unix_other.go b/vendor/golang.org/x/term/term_unix_other.go deleted file mode 100644 index 1e8955c9..00000000 --- a/vendor/golang.org/x/term/term_unix_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix || linux || solaris || zos -// +build aix linux solaris zos - -package term - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS -const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/term/term_unsupported.go b/vendor/golang.org/x/term/term_unsupported.go deleted file mode 100644 index f1df8506..00000000 --- a/vendor/golang.org/x/term/term_unsupported.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9 -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9 - -package term - -import ( - "fmt" - "runtime" -) - -type state struct{} - -func isTerminal(fd int) bool { - return false -} - -func makeRaw(fd int) (*State, error) { - return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} - -func getState(fd int) (*State, error) { - return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} - -func restore(fd int, state *State) error { - return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} - -func getSize(fd int) (width, height int, err error) { - return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} - -func readPassword(fd int) ([]byte, error) { - return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go deleted file mode 100644 index 465f5606..00000000 --- a/vendor/golang.org/x/term/term_windows.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import ( - "os" - - "golang.org/x/sys/windows" -) - -type state struct { - mode uint32 -} - -func isTerminal(fd int) bool { - var st uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &st) - return err == nil -} - -func makeRaw(fd int) (*State, error) { - var st uint32 - if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { - return nil, err - } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) - if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { - return nil, err - } - return &State{state{st}}, nil -} - -func getState(fd int) (*State, error) { - var st uint32 - if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { - return nil, err - } - return &State{state{st}}, nil -} - -func restore(fd int, state *State) error { - return windows.SetConsoleMode(windows.Handle(fd), state.mode) -} - -func getSize(fd int) (width, height int, err error) { - var info windows.ConsoleScreenBufferInfo - if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { - return 0, 0, err - } - return int(info.Window.Right - info.Window.Left + 1), int(info.Window.Bottom - info.Window.Top + 1), nil -} - -func readPassword(fd int) ([]byte, error) { - var st uint32 - if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { - return nil, err - } - old := st - - st &^= (windows.ENABLE_ECHO_INPUT | windows.ENABLE_LINE_INPUT) - st |= (windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_PROCESSED_INPUT) - if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil { - return nil, err - } - - defer windows.SetConsoleMode(windows.Handle(fd), old) - - var h windows.Handle - p, _ := windows.GetCurrentProcess() - if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil { - return nil, err - } - - f := os.NewFile(uintptr(h), "stdin") - defer f.Close() - return readPasswordLine(f) -} diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go deleted file mode 100644 index f636667f..00000000 --- a/vendor/golang.org/x/term/terminal.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import ( - "bytes" - "io" - "runtime" - "strconv" - "sync" - "unicode/utf8" -) - -// EscapeCodes contains escape sequences that can be written to the terminal in -// order to achieve different styles of text. -type EscapeCodes struct { - // Foreground colors - Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte - - // Reset all attributes - Reset []byte -} - -var vt100EscapeCodes = EscapeCodes{ - Black: []byte{keyEscape, '[', '3', '0', 'm'}, - Red: []byte{keyEscape, '[', '3', '1', 'm'}, - Green: []byte{keyEscape, '[', '3', '2', 'm'}, - Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, - Blue: []byte{keyEscape, '[', '3', '4', 'm'}, - Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, - Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, - White: []byte{keyEscape, '[', '3', '7', 'm'}, - - Reset: []byte{keyEscape, '[', '0', 'm'}, -} - -// Terminal contains the state for running a VT100 terminal that is capable of -// reading lines of input. -type Terminal struct { - // AutoCompleteCallback, if non-null, is called for each keypress with - // the full input line and the current position of the cursor (in - // bytes, as an index into |line|). If it returns ok=false, the key - // press is processed normally. Otherwise it returns a replacement line - // and the new cursor position. - AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) - - // Escape contains a pointer to the escape codes for this terminal. - // It's always a valid pointer, although the escape codes themselves - // may be empty if the terminal doesn't support them. - Escape *EscapeCodes - - // lock protects the terminal and the state in this object from - // concurrent processing of a key press and a Write() call. - lock sync.Mutex - - c io.ReadWriter - prompt []rune - - // line is the current line being entered. - line []rune - // pos is the logical position of the cursor in line - pos int - // echo is true if local echo is enabled - echo bool - // pasteActive is true iff there is a bracketed paste operation in - // progress. - pasteActive bool - - // cursorX contains the current X value of the cursor where the left - // edge is 0. cursorY contains the row number where the first row of - // the current line is 0. - cursorX, cursorY int - // maxLine is the greatest value of cursorY so far. - maxLine int - - termWidth, termHeight int - - // outBuf contains the terminal data to be sent. - outBuf []byte - // remainder contains the remainder of any partial key sequences after - // a read. It aliases into inBuf. - remainder []byte - inBuf [256]byte - - // history contains previously entered commands so that they can be - // accessed with the up and down keys. - history stRingBuffer - // historyIndex stores the currently accessed history entry, where zero - // means the immediately previous entry. - historyIndex int - // When navigating up and down the history it's possible to return to - // the incomplete, initial line. That value is stored in - // historyPending. - historyPending string -} - -// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is -// a local terminal, that terminal must first have been put into raw mode. -// prompt is a string that is written at the start of each input line (i.e. -// "> "). -func NewTerminal(c io.ReadWriter, prompt string) *Terminal { - return &Terminal{ - Escape: &vt100EscapeCodes, - c: c, - prompt: []rune(prompt), - termWidth: 80, - termHeight: 24, - echo: true, - historyIndex: -1, - } -} - -const ( - keyCtrlC = 3 - keyCtrlD = 4 - keyCtrlU = 21 - keyEnter = '\r' - keyEscape = 27 - keyBackspace = 127 - keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota - keyUp - keyDown - keyLeft - keyRight - keyAltLeft - keyAltRight - keyHome - keyEnd - keyDeleteWord - keyDeleteLine - keyClearScreen - keyPasteStart - keyPasteEnd -) - -var ( - crlf = []byte{'\r', '\n'} - pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} - pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} -) - -// bytesToKey tries to parse a key sequence from b. If successful, it returns -// the key and the remainder of the input. Otherwise it returns utf8.RuneError. -func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { - if len(b) == 0 { - return utf8.RuneError, nil - } - - if !pasteActive { - switch b[0] { - case 1: // ^A - return keyHome, b[1:] - case 2: // ^B - return keyLeft, b[1:] - case 5: // ^E - return keyEnd, b[1:] - case 6: // ^F - return keyRight, b[1:] - case 8: // ^H - return keyBackspace, b[1:] - case 11: // ^K - return keyDeleteLine, b[1:] - case 12: // ^L - return keyClearScreen, b[1:] - case 23: // ^W - return keyDeleteWord, b[1:] - case 14: // ^N - return keyDown, b[1:] - case 16: // ^P - return keyUp, b[1:] - } - } - - if b[0] != keyEscape { - if !utf8.FullRune(b) { - return utf8.RuneError, b - } - r, l := utf8.DecodeRune(b) - return r, b[l:] - } - - if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { - switch b[2] { - case 'A': - return keyUp, b[3:] - case 'B': - return keyDown, b[3:] - case 'C': - return keyRight, b[3:] - case 'D': - return keyLeft, b[3:] - case 'H': - return keyHome, b[3:] - case 'F': - return keyEnd, b[3:] - } - } - - if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { - switch b[5] { - case 'C': - return keyAltRight, b[6:] - case 'D': - return keyAltLeft, b[6:] - } - } - - if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { - return keyPasteStart, b[6:] - } - - if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { - return keyPasteEnd, b[6:] - } - - // If we get here then we have a key that we don't recognise, or a - // partial sequence. It's not clear how one should find the end of a - // sequence without knowing them all, but it seems that [a-zA-Z~] only - // appears at the end of a sequence. - for i, c := range b[0:] { - if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { - return keyUnknown, b[i+1:] - } - } - - return utf8.RuneError, b -} - -// queue appends data to the end of t.outBuf -func (t *Terminal) queue(data []rune) { - t.outBuf = append(t.outBuf, []byte(string(data))...) -} - -var space = []rune{' '} - -func isPrintable(key rune) bool { - isInSurrogateArea := key >= 0xd800 && key <= 0xdbff - return key >= 32 && !isInSurrogateArea -} - -// moveCursorToPos appends data to t.outBuf which will move the cursor to the -// given, logical position in the text. -func (t *Terminal) moveCursorToPos(pos int) { - if !t.echo { - return - } - - x := visualLength(t.prompt) + pos - y := x / t.termWidth - x = x % t.termWidth - - up := 0 - if y < t.cursorY { - up = t.cursorY - y - } - - down := 0 - if y > t.cursorY { - down = y - t.cursorY - } - - left := 0 - if x < t.cursorX { - left = t.cursorX - x - } - - right := 0 - if x > t.cursorX { - right = x - t.cursorX - } - - t.cursorX = x - t.cursorY = y - t.move(up, down, left, right) -} - -func (t *Terminal) move(up, down, left, right int) { - m := []rune{} - - // 1 unit up can be expressed as ^[[A or ^[A - // 5 units up can be expressed as ^[[5A - - if up == 1 { - m = append(m, keyEscape, '[', 'A') - } else if up > 1 { - m = append(m, keyEscape, '[') - m = append(m, []rune(strconv.Itoa(up))...) - m = append(m, 'A') - } - - if down == 1 { - m = append(m, keyEscape, '[', 'B') - } else if down > 1 { - m = append(m, keyEscape, '[') - m = append(m, []rune(strconv.Itoa(down))...) - m = append(m, 'B') - } - - if right == 1 { - m = append(m, keyEscape, '[', 'C') - } else if right > 1 { - m = append(m, keyEscape, '[') - m = append(m, []rune(strconv.Itoa(right))...) - m = append(m, 'C') - } - - if left == 1 { - m = append(m, keyEscape, '[', 'D') - } else if left > 1 { - m = append(m, keyEscape, '[') - m = append(m, []rune(strconv.Itoa(left))...) - m = append(m, 'D') - } - - t.queue(m) -} - -func (t *Terminal) clearLineToRight() { - op := []rune{keyEscape, '[', 'K'} - t.queue(op) -} - -const maxLineLength = 4096 - -func (t *Terminal) setLine(newLine []rune, newPos int) { - if t.echo { - t.moveCursorToPos(0) - t.writeLine(newLine) - for i := len(newLine); i < len(t.line); i++ { - t.writeLine(space) - } - t.moveCursorToPos(newPos) - } - t.line = newLine - t.pos = newPos -} - -func (t *Terminal) advanceCursor(places int) { - t.cursorX += places - t.cursorY += t.cursorX / t.termWidth - if t.cursorY > t.maxLine { - t.maxLine = t.cursorY - } - t.cursorX = t.cursorX % t.termWidth - - if places > 0 && t.cursorX == 0 { - // Normally terminals will advance the current position - // when writing a character. But that doesn't happen - // for the last character in a line. However, when - // writing a character (except a new line) that causes - // a line wrap, the position will be advanced two - // places. - // - // So, if we are stopping at the end of a line, we - // need to write a newline so that our cursor can be - // advanced to the next line. - t.outBuf = append(t.outBuf, '\r', '\n') - } -} - -func (t *Terminal) eraseNPreviousChars(n int) { - if n == 0 { - return - } - - if t.pos < n { - n = t.pos - } - t.pos -= n - t.moveCursorToPos(t.pos) - - copy(t.line[t.pos:], t.line[n+t.pos:]) - t.line = t.line[:len(t.line)-n] - if t.echo { - t.writeLine(t.line[t.pos:]) - for i := 0; i < n; i++ { - t.queue(space) - } - t.advanceCursor(n) - t.moveCursorToPos(t.pos) - } -} - -// countToLeftWord returns then number of characters from the cursor to the -// start of the previous word. -func (t *Terminal) countToLeftWord() int { - if t.pos == 0 { - return 0 - } - - pos := t.pos - 1 - for pos > 0 { - if t.line[pos] != ' ' { - break - } - pos-- - } - for pos > 0 { - if t.line[pos] == ' ' { - pos++ - break - } - pos-- - } - - return t.pos - pos -} - -// countToRightWord returns then number of characters from the cursor to the -// start of the next word. -func (t *Terminal) countToRightWord() int { - pos := t.pos - for pos < len(t.line) { - if t.line[pos] == ' ' { - break - } - pos++ - } - for pos < len(t.line) { - if t.line[pos] != ' ' { - break - } - pos++ - } - return pos - t.pos -} - -// visualLength returns the number of visible glyphs in s. -func visualLength(runes []rune) int { - inEscapeSeq := false - length := 0 - - for _, r := range runes { - switch { - case inEscapeSeq: - if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { - inEscapeSeq = false - } - case r == '\x1b': - inEscapeSeq = true - default: - length++ - } - } - - return length -} - -// handleKey processes the given key and, optionally, returns a line of text -// that the user has entered. -func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { - t.addKeyToLine(key) - return - } - - switch key { - case keyBackspace: - if t.pos == 0 { - return - } - t.eraseNPreviousChars(1) - case keyAltLeft: - // move left by a word. - t.pos -= t.countToLeftWord() - t.moveCursorToPos(t.pos) - case keyAltRight: - // move right by a word. - t.pos += t.countToRightWord() - t.moveCursorToPos(t.pos) - case keyLeft: - if t.pos == 0 { - return - } - t.pos-- - t.moveCursorToPos(t.pos) - case keyRight: - if t.pos == len(t.line) { - return - } - t.pos++ - t.moveCursorToPos(t.pos) - case keyHome: - if t.pos == 0 { - return - } - t.pos = 0 - t.moveCursorToPos(t.pos) - case keyEnd: - if t.pos == len(t.line) { - return - } - t.pos = len(t.line) - t.moveCursorToPos(t.pos) - case keyUp: - entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) - if !ok { - return "", false - } - if t.historyIndex == -1 { - t.historyPending = string(t.line) - } - t.historyIndex++ - runes := []rune(entry) - t.setLine(runes, len(runes)) - case keyDown: - switch t.historyIndex { - case -1: - return - case 0: - runes := []rune(t.historyPending) - t.setLine(runes, len(runes)) - t.historyIndex-- - default: - entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) - if ok { - t.historyIndex-- - runes := []rune(entry) - t.setLine(runes, len(runes)) - } - } - case keyEnter: - t.moveCursorToPos(len(t.line)) - t.queue([]rune("\r\n")) - line = string(t.line) - ok = true - t.line = t.line[:0] - t.pos = 0 - t.cursorX = 0 - t.cursorY = 0 - t.maxLine = 0 - case keyDeleteWord: - // Delete zero or more spaces and then one or more characters. - t.eraseNPreviousChars(t.countToLeftWord()) - case keyDeleteLine: - // Delete everything from the current cursor position to the - // end of line. - for i := t.pos; i < len(t.line); i++ { - t.queue(space) - t.advanceCursor(1) - } - t.line = t.line[:t.pos] - t.moveCursorToPos(t.pos) - case keyCtrlD: - // Erase the character under the current position. - // The EOF case when the line is empty is handled in - // readLine(). - if t.pos < len(t.line) { - t.pos++ - t.eraseNPreviousChars(1) - } - case keyCtrlU: - t.eraseNPreviousChars(t.pos) - case keyClearScreen: - // Erases the screen and moves the cursor to the home position. - t.queue([]rune("\x1b[2J\x1b[H")) - t.queue(t.prompt) - t.cursorX, t.cursorY = 0, 0 - t.advanceCursor(visualLength(t.prompt)) - t.setLine(t.line, t.pos) - default: - if t.AutoCompleteCallback != nil { - prefix := string(t.line[:t.pos]) - suffix := string(t.line[t.pos:]) - - t.lock.Unlock() - newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) - t.lock.Lock() - - if completeOk { - t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) - return - } - } - if !isPrintable(key) { - return - } - if len(t.line) == maxLineLength { - return - } - t.addKeyToLine(key) - } - return -} - -// addKeyToLine inserts the given key at the current position in the current -// line. -func (t *Terminal) addKeyToLine(key rune) { - if len(t.line) == cap(t.line) { - newLine := make([]rune, len(t.line), 2*(1+len(t.line))) - copy(newLine, t.line) - t.line = newLine - } - t.line = t.line[:len(t.line)+1] - copy(t.line[t.pos+1:], t.line[t.pos:]) - t.line[t.pos] = key - if t.echo { - t.writeLine(t.line[t.pos:]) - } - t.pos++ - t.moveCursorToPos(t.pos) -} - -func (t *Terminal) writeLine(line []rune) { - for len(line) != 0 { - remainingOnLine := t.termWidth - t.cursorX - todo := len(line) - if todo > remainingOnLine { - todo = remainingOnLine - } - t.queue(line[:todo]) - t.advanceCursor(visualLength(line[:todo])) - line = line[todo:] - } -} - -// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n. -func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { - for len(buf) > 0 { - i := bytes.IndexByte(buf, '\n') - todo := len(buf) - if i >= 0 { - todo = i - } - - var nn int - nn, err = w.Write(buf[:todo]) - n += nn - if err != nil { - return n, err - } - buf = buf[todo:] - - if i >= 0 { - if _, err = w.Write(crlf); err != nil { - return n, err - } - n++ - buf = buf[1:] - } - } - - return n, nil -} - -func (t *Terminal) Write(buf []byte) (n int, err error) { - t.lock.Lock() - defer t.lock.Unlock() - - if t.cursorX == 0 && t.cursorY == 0 { - // This is the easy case: there's nothing on the screen that we - // have to move out of the way. - return writeWithCRLF(t.c, buf) - } - - // We have a prompt and possibly user input on the screen. We - // have to clear it first. - t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) - t.cursorX = 0 - t.clearLineToRight() - - for t.cursorY > 0 { - t.move(1 /* up */, 0, 0, 0) - t.cursorY-- - t.clearLineToRight() - } - - if _, err = t.c.Write(t.outBuf); err != nil { - return - } - t.outBuf = t.outBuf[:0] - - if n, err = writeWithCRLF(t.c, buf); err != nil { - return - } - - t.writeLine(t.prompt) - if t.echo { - t.writeLine(t.line) - } - - t.moveCursorToPos(t.pos) - - if _, err = t.c.Write(t.outBuf); err != nil { - return - } - t.outBuf = t.outBuf[:0] - return -} - -// ReadPassword temporarily changes the prompt and reads a password, without -// echo, from the terminal. -func (t *Terminal) ReadPassword(prompt string) (line string, err error) { - t.lock.Lock() - defer t.lock.Unlock() - - oldPrompt := t.prompt - t.prompt = []rune(prompt) - t.echo = false - - line, err = t.readLine() - - t.prompt = oldPrompt - t.echo = true - - return -} - -// ReadLine returns a line of input from the terminal. -func (t *Terminal) ReadLine() (line string, err error) { - t.lock.Lock() - defer t.lock.Unlock() - - return t.readLine() -} - -func (t *Terminal) readLine() (line string, err error) { - // t.lock must be held at this point - - if t.cursorX == 0 && t.cursorY == 0 { - t.writeLine(t.prompt) - t.c.Write(t.outBuf) - t.outBuf = t.outBuf[:0] - } - - lineIsPasted := t.pasteActive - - for { - rest := t.remainder - lineOk := false - for !lineOk { - var key rune - key, rest = bytesToKey(rest, t.pasteActive) - if key == utf8.RuneError { - break - } - if !t.pasteActive { - if key == keyCtrlD { - if len(t.line) == 0 { - return "", io.EOF - } - } - if key == keyCtrlC { - return "", io.EOF - } - if key == keyPasteStart { - t.pasteActive = true - if len(t.line) == 0 { - lineIsPasted = true - } - continue - } - } else if key == keyPasteEnd { - t.pasteActive = false - continue - } - if !t.pasteActive { - lineIsPasted = false - } - line, lineOk = t.handleKey(key) - } - if len(rest) > 0 { - n := copy(t.inBuf[:], rest) - t.remainder = t.inBuf[:n] - } else { - t.remainder = nil - } - t.c.Write(t.outBuf) - t.outBuf = t.outBuf[:0] - if lineOk { - if t.echo { - t.historyIndex = -1 - t.history.Add(line) - } - if lineIsPasted { - err = ErrPasteIndicator - } - return - } - - // t.remainder is a slice at the beginning of t.inBuf - // containing a partial key sequence - readBuf := t.inBuf[len(t.remainder):] - var n int - - t.lock.Unlock() - n, err = t.c.Read(readBuf) - t.lock.Lock() - - if err != nil { - return - } - - t.remainder = t.inBuf[:n+len(t.remainder)] - } -} - -// SetPrompt sets the prompt to be used when reading subsequent lines. -func (t *Terminal) SetPrompt(prompt string) { - t.lock.Lock() - defer t.lock.Unlock() - - t.prompt = []rune(prompt) -} - -func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { - // Move cursor to column zero at the start of the line. - t.move(t.cursorY, 0, t.cursorX, 0) - t.cursorX, t.cursorY = 0, 0 - t.clearLineToRight() - for t.cursorY < numPrevLines { - // Move down a line - t.move(0, 1, 0, 0) - t.cursorY++ - t.clearLineToRight() - } - // Move back to beginning. - t.move(t.cursorY, 0, 0, 0) - t.cursorX, t.cursorY = 0, 0 - - t.queue(t.prompt) - t.advanceCursor(visualLength(t.prompt)) - t.writeLine(t.line) - t.moveCursorToPos(t.pos) -} - -func (t *Terminal) SetSize(width, height int) error { - t.lock.Lock() - defer t.lock.Unlock() - - if width == 0 { - width = 1 - } - - oldWidth := t.termWidth - t.termWidth, t.termHeight = width, height - - switch { - case width == oldWidth: - // If the width didn't change then nothing else needs to be - // done. - return nil - case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: - // If there is nothing on current line and no prompt printed, - // just do nothing - return nil - case width < oldWidth: - // Some terminals (e.g. xterm) will truncate lines that were - // too long when shinking. Others, (e.g. gnome-terminal) will - // attempt to wrap them. For the former, repainting t.maxLine - // works great, but that behaviour goes badly wrong in the case - // of the latter because they have doubled every full line. - - // We assume that we are working on a terminal that wraps lines - // and adjust the cursor position based on every previous line - // wrapping and turning into two. This causes the prompt on - // xterms to move upwards, which isn't great, but it avoids a - // huge mess with gnome-terminal. - if t.cursorX >= t.termWidth { - t.cursorX = t.termWidth - 1 - } - t.cursorY *= 2 - t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) - case width > oldWidth: - // If the terminal expands then our position calculations will - // be wrong in the future because we think the cursor is - // |t.pos| chars into the string, but there will be a gap at - // the end of any wrapped line. - // - // But the position will actually be correct until we move, so - // we can move back to the beginning and repaint everything. - t.clearAndRepaintLinePlusNPrevious(t.maxLine) - } - - _, err := t.c.Write(t.outBuf) - t.outBuf = t.outBuf[:0] - return err -} - -type pasteIndicatorError struct{} - -func (pasteIndicatorError) Error() string { - return "terminal: ErrPasteIndicator not correctly handled" -} - -// ErrPasteIndicator may be returned from ReadLine as the error, in addition -// to valid line data. It indicates that bracketed paste mode is enabled and -// that the returned line consists only of pasted data. Programs may wish to -// interpret pasted data more literally than typed data. -var ErrPasteIndicator = pasteIndicatorError{} - -// SetBracketedPasteMode requests that the terminal bracket paste operations -// with markers. Not all terminals support this but, if it is supported, then -// enabling this mode will stop any autocomplete callback from running due to -// pastes. Additionally, any lines that are completely pasted will be returned -// from ReadLine with the error set to ErrPasteIndicator. -func (t *Terminal) SetBracketedPasteMode(on bool) { - if on { - io.WriteString(t.c, "\x1b[?2004h") - } else { - io.WriteString(t.c, "\x1b[?2004l") - } -} - -// stRingBuffer is a ring buffer of strings. -type stRingBuffer struct { - // entries contains max elements. - entries []string - max int - // head contains the index of the element most recently added to the ring. - head int - // size contains the number of elements in the ring. - size int -} - -func (s *stRingBuffer) Add(a string) { - if s.entries == nil { - const defaultNumEntries = 100 - s.entries = make([]string, defaultNumEntries) - s.max = defaultNumEntries - } - - s.head = (s.head + 1) % s.max - s.entries[s.head] = a - if s.size < s.max { - s.size++ - } -} - -// NthPreviousEntry returns the value passed to the nth previous call to Add. -// If n is zero then the immediately prior value is returned, if one, then the -// next most recent, and so on. If such an element doesn't exist then ok is -// false. -func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { - if n < 0 || n >= s.size { - return "", false - } - index := s.head - n - if index < 0 { - index += s.max - } - return s.entries[index], true -} - -// readPasswordLine reads from reader until it finds \n or io.EOF. -// The slice returned does not include the \n. -// readPasswordLine also ignores any \r it finds. -// Windows uses \r as end of line. So, on Windows, readPasswordLine -// reads until it finds \r and ignores any \n it finds during processing. -func readPasswordLine(reader io.Reader) ([]byte, error) { - var buf [1]byte - var ret []byte - - for { - n, err := reader.Read(buf[:]) - if n > 0 { - switch buf[0] { - case '\b': - if len(ret) > 0 { - ret = ret[:len(ret)-1] - } - case '\n': - if runtime.GOOS != "windows" { - return ret, nil - } - // otherwise ignore \n - case '\r': - if runtime.GOOS == "windows" { - return ret, nil - } - // otherwise ignore \r - default: - ret = append(ret, buf[0]) - } - continue - } - if err != nil { - if err == io.EOF && len(ret) > 0 { - return ret, nil - } - return ret, err - } - } -} diff --git a/vendor/gopkg.in/errgo.v1/LICENSE b/vendor/gopkg.in/errgo.v1/LICENSE deleted file mode 100644 index e44c4cc2..00000000 --- a/vendor/gopkg.in/errgo.v1/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright © 2013, Roger Peppe -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of this project nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/errgo.v1/README.md b/vendor/gopkg.in/errgo.v1/README.md deleted file mode 100644 index c5bb5f62..00000000 --- a/vendor/gopkg.in/errgo.v1/README.md +++ /dev/null @@ -1,259 +0,0 @@ -# errgo --- - import "gopkg.in/errgo.v1" - -The errgo package provides a way to create and diagnose errors. It is compatible -with the usual Go error idioms but adds a way to wrap errors so that they record -source location information while retaining a consistent way for code to inspect -errors to find out particular problems. - -## Usage - -#### func Any - -```go -func Any(error) bool -``` -Any returns true. It can be used as an argument to Mask to allow any diagnosis -to pass through to the wrapped error. - -#### func Cause - -```go -func Cause(err error) error -``` -Cause returns the cause of the given error. If err does not implement Causer or -its Cause method returns nil, it returns err itself. - -Cause is the usual way to diagnose errors that may have been wrapped by Mask or -NoteMask. - -#### func Details - -```go -func Details(err error) string -``` -Details returns information about the stack of underlying errors wrapped by err, -in the format: - - [{filename:99: error one} {otherfile:55: cause of error one}] - -The details are found by type-asserting the error to the Locationer, Causer and -Wrapper interfaces. Details of the underlying stack are found by recursively -calling Underlying when the underlying error implements Wrapper. - -#### func Is - -```go -func Is(err error) func(error) bool -``` -Is returns a function that returns whether the an error is equal to the given -error. It is intended to be used as a "pass" argument to Mask and friends; for -example: - - return errgo.Mask(err, errgo.Is(http.ErrNoCookie)) - -would return an error with an http.ErrNoCookie cause only if that was err's -diagnosis; otherwise the diagnosis would be itself. - -#### func Mask - -```go -func Mask(underlying error, pass ...func(error) bool) error -``` -Mask returns an Err that wraps the given underyling error. The error message is -unchanged, but the error location records the caller of Mask. - -If err is nil, Mask returns nil. - -By default Mask conceals the cause of the wrapped error, but if pass(Cause(err)) -returns true for any of the provided pass functions, the cause of the returned -error will be Cause(err). - -For example, the following code will return an error whose cause is the error -from the os.Open call when (and only when) the file does not exist. - - f, err := os.Open("non-existent-file") - if err != nil { - return errgo.Mask(err, os.IsNotExist) - } - -In order to add context to returned errors, it is conventional to call Mask when -returning any error received from elsewhere. - -#### func MaskFunc - -```go -func MaskFunc(allow ...func(error) bool) func(error, ...func(error) bool) error -``` -MaskFunc returns an equivalent of Mask that always allows the specified causes -in addition to any causes specified when the returned function is called. - -It is defined for convenience, for example when all calls to Mask in a given -package wish to allow the same set of causes to be returned. - -#### func New - -```go -func New(s string) error -``` -New returns a new error with the given error message and no cause. It is a -drop-in replacement for errors.New from the standard library. - -#### func Newf - -```go -func Newf(f string, a ...interface{}) error -``` -Newf returns a new error with the given printf-formatted error message and no -cause. - -#### func NoteMask - -```go -func NoteMask(underlying error, msg string, pass ...func(error) bool) error -``` -NoteMask returns an Err that has the given underlying error, with the given -message added as context, and allowing the cause of the underlying error to pass -through into the result if allowed by the specific pass functions (see Mask for -an explanation of the pass parameter). - -#### func Notef - -```go -func Notef(underlying error, f string, a ...interface{}) error -``` -Notef returns an Error that wraps the given underlying error and adds the given -formatted context message. The returned error has no cause (use NoteMask or -WithCausef to add a message while retaining a cause). - -#### func WithCausef - -```go -func WithCausef(underlying, cause error, f string, a ...interface{}) error -``` -WithCausef returns a new Error that wraps the given (possibly nil) underlying -error and associates it with the given cause. The given formatted message -context will also be added. - -#### type Causer - -```go -type Causer interface { - Cause() error -} -``` - -Causer is the type of an error that may provide an error cause for error -diagnosis. Cause may return nil if there is no cause (for example because the -cause has been masked). - -#### type Err - -```go -type Err struct { - // Message_ holds the text of the error message. It may be empty - // if Underlying is set. - Message_ string - - // Cause_ holds the cause of the error as returned - // by the Cause method. - Cause_ error - - // Underlying_ holds the underlying error, if any. - Underlying_ error - - // File and Line identify the source code location where the error was - // created. - File string - Line int -} -``` - -Err holds a description of an error along with information about where the error -was created. - -It may be embedded in custom error types to add extra information that this -errors package can understand. - -#### func (*Err) Cause - -```go -func (e *Err) Cause() error -``` -Cause implements Causer. - -#### func (*Err) Error - -```go -func (e *Err) Error() string -``` -Error implements error.Error. - -#### func (*Err) GoString - -```go -func (e *Err) GoString() string -``` -GoString returns the details of the receiving error message, so that printing an -error with %#v will produce useful information. - -#### func (*Err) Location - -```go -func (e *Err) Location() (file string, line int) -``` -Location implements Locationer. - -#### func (*Err) Message - -```go -func (e *Err) Message() string -``` -Message returns the top level error message. - -#### func (*Err) SetLocation - -```go -func (e *Err) SetLocation(callDepth int) -``` -Locate records the source location of the error by setting e.Location, at -callDepth stack frames above the call. - -#### func (*Err) Underlying - -```go -func (e *Err) Underlying() error -``` -Underlying returns the underlying error if any. - -#### type Locationer - -```go -type Locationer interface { - // Location returns the name of the file and the line - // number associated with an error. - Location() (file string, line int) -} -``` - -Locationer can be implemented by any error type that wants to expose the source -location of an error. - -#### type Wrapper - -```go -type Wrapper interface { - // Message returns the top level error message, - // not including the message from the underlying - // error. - Message() string - - // Underlying returns the underlying error, or nil - // if there is none. - Underlying() error -} -``` - -Wrapper is the type of an error that wraps another error. It is exposed so that -external types may implement it, but should in general not be used otherwise. diff --git a/vendor/gopkg.in/errgo.v1/errors.go b/vendor/gopkg.in/errgo.v1/errors.go deleted file mode 100644 index 4d6a5e3c..00000000 --- a/vendor/gopkg.in/errgo.v1/errors.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2014 Roger Peppe. -// See LICENCE file for details. - -// Package errgo provides a way to create -// and diagnose errors. It is compatible with -// the usual Go error idioms but adds a way to wrap errors -// so that they record source location information -// while retaining a consistent way for code to -// inspect errors to find out particular problems. -// -package errgo - -import ( - "bytes" - "fmt" - "log" - "runtime" -) - -const debug = false - -// Err holds a description of an error along with information about -// where the error was created. -// -// It may be embedded in custom error types to add -// extra information that this errors package can -// understand. -type Err struct { - // Message_ holds the text of the error message. It may be empty - // if Underlying is set. - Message_ string - - // Cause_ holds the cause of the error as returned - // by the Cause method. - Cause_ error - - // Underlying_ holds the underlying error, if any. - Underlying_ error - - // File and Line identify the source code location where the error was - // created. - File string - Line int -} - -// Location implements Locationer. -func (e *Err) Location() (file string, line int) { - return e.File, e.Line -} - -// Underlying returns the underlying error if any. -func (e *Err) Underlying() error { - return e.Underlying_ -} - -// Cause implements Causer. -func (e *Err) Cause() error { - return e.Cause_ -} - -// Message returns the top level error message. -func (e *Err) Message() string { - return e.Message_ -} - -// Error implements error.Error. -func (e *Err) Error() string { - switch { - case e.Message_ == "" && e.Underlying_ == nil: - return "<no error>" - case e.Message_ == "": - return e.Underlying_.Error() - case e.Underlying_ == nil: - return e.Message_ - } - return fmt.Sprintf("%s: %v", e.Message_, e.Underlying_) -} - -// GoString returns the details of the receiving error -// message, so that printing an error with %#v will -// produce useful information. -func (e *Err) GoString() string { - return Details(e) -} - -// Causer is the type of an error that may provide -// an error cause for error diagnosis. Cause may return -// nil if there is no cause (for example because the -// cause has been masked). -type Causer interface { - Cause() error -} - -// Wrapper is the type of an error that wraps another error. It is -// exposed so that external types may implement it, but should in -// general not be used otherwise. -type Wrapper interface { - // Message returns the top level error message, - // not including the message from the underlying - // error. - Message() string - - // Underlying returns the underlying error, or nil - // if there is none. - Underlying() error -} - -// Locationer can be implemented by any error type -// that wants to expose the source location of an error. -type Locationer interface { - // Location returns the name of the file and the line - // number associated with an error. - Location() (file string, line int) -} - -// Details returns information about the stack of -// underlying errors wrapped by err, in the format: -// -// [{filename:99: error one} {otherfile:55: cause of error one}] -// -// The details are found by type-asserting the error to -// the Locationer, Causer and Wrapper interfaces. -// Details of the underlying stack are found by -// recursively calling Underlying when the -// underlying error implements Wrapper. -func Details(err error) string { - if err == nil { - return "[]" - } - var s []byte - s = append(s, '[') - for { - s = append(s, '{') - if err, ok := err.(Locationer); ok { - file, line := err.Location() - if file != "" { - s = append(s, fmt.Sprintf("%s:%d", file, line)...) - s = append(s, ": "...) - } - } - if cerr, ok := err.(Wrapper); ok { - s = append(s, cerr.Message()...) - err = cerr.Underlying() - } else { - s = append(s, err.Error()...) - err = nil - } - if debug { - if err, ok := err.(Causer); ok { - if cause := err.Cause(); cause != nil { - s = append(s, fmt.Sprintf("=%T", cause)...) - s = append(s, Details(cause)...) - } - } - } - s = append(s, '}') - if err == nil { - break - } - s = append(s, ' ') - } - s = append(s, ']') - return string(s) -} - -// SetLocation records the source location of the error by setting -// e.Location, at callDepth stack frames above the call. -func (e *Err) SetLocation(callDepth int) { - _, file, line, _ := runtime.Caller(callDepth + 1) - e.File, e.Line = file, line -} - -func setLocation(err error, callDepth int) { - if e, _ := err.(*Err); e != nil { - e.SetLocation(callDepth + 1) - } -} - -// New returns a new error with the given error message and no cause. It -// is a drop-in replacement for errors.New from the standard library. -func New(s string) error { - err := &Err{Message_: s} - err.SetLocation(1) - return err -} - -// Newf returns a new error with the given printf-formatted error -// message and no cause. -func Newf(f string, a ...interface{}) error { - err := &Err{Message_: fmt.Sprintf(f, a...)} - err.SetLocation(1) - return err -} - -// match returns whether any of the given -// functions returns true when called with err as an -// argument. -func match(err error, pass ...func(error) bool) bool { - for _, f := range pass { - if f(err) { - return true - } - } - return false -} - -// Is returns a function that returns whether the -// an error is equal to the given error. -// It is intended to be used as a "pass" argument -// to Mask and friends; for example: -// -// return errgo.Mask(err, errgo.Is(http.ErrNoCookie)) -// -// would return an error with an http.ErrNoCookie cause -// only if that was err's diagnosis; otherwise the diagnosis -// would be itself. -func Is(err error) func(error) bool { - return func(err1 error) bool { - return err == err1 - } -} - -// Any returns true. It can be used as an argument to Mask -// to allow any diagnosis to pass through to the wrapped -// error. -func Any(error) bool { - return true -} - -// NoteMask returns an Err that has the given underlying error, -// with the given message added as context, and allowing -// the cause of the underlying error to pass through into -// the result if allowed by the specific pass functions -// (see Mask for an explanation of the pass parameter). -func NoteMask(underlying error, msg string, pass ...func(error) bool) error { - err := noteMask(underlying, msg, pass...) - setLocation(err, 1) - return err -} - -// noteMask is exactly like NoteMask except it doesn't set the location -// of the returned error, so that we can avoid setting it twice -// when it's used in other functions. -func noteMask(underlying error, msg string, pass ...func(error) bool) error { - newErr := &Err{ - Underlying_: underlying, - Message_: msg, - } - if len(pass) > 0 { - if cause := Cause(underlying); match(cause, pass...) { - newErr.Cause_ = cause - } - } - if debug { - if newd, oldd := newErr.Cause_, Cause(underlying); newd != oldd { - log.Printf("Mask cause %[1]T(%[1]v)->%[2]T(%[2]v)", oldd, newd) - log.Printf("call stack: %s", callers(0, 20)) - log.Printf("len(allow) == %d", len(pass)) - log.Printf("old error %#v", underlying) - log.Printf("new error %#v", newErr) - } - } - newErr.SetLocation(1) - return newErr -} - -// Mask returns an Err that wraps the given underyling error. The error -// message is unchanged, but the error location records the caller of -// Mask. -// -// If err is nil, Mask returns nil. -// -// By default Mask conceals the cause of the wrapped error, but if -// pass(Cause(err)) returns true for any of the provided pass functions, -// the cause of the returned error will be Cause(err). -// -// For example, the following code will return an error whose cause is -// the error from the os.Open call when (and only when) the file does -// not exist. -// -// f, err := os.Open("non-existent-file") -// if err != nil { -// return errgo.Mask(err, os.IsNotExist) -// } -// -// In order to add context to returned errors, it -// is conventional to call Mask when returning any -// error received from elsewhere. -// -func Mask(underlying error, pass ...func(error) bool) error { - if underlying == nil { - return nil - } - err := noteMask(underlying, "", pass...) - setLocation(err, 1) - return err -} - -// Notef returns an Error that wraps the given underlying -// error and adds the given formatted context message. -// The returned error has no cause (use NoteMask -// or WithCausef to add a message while retaining a cause). -func Notef(underlying error, f string, a ...interface{}) error { - err := noteMask(underlying, fmt.Sprintf(f, a...)) - setLocation(err, 1) - return err -} - -// MaskFunc returns an equivalent of Mask that always allows the -// specified causes in addition to any causes specified when the -// returned function is called. -// -// It is defined for convenience, for example when all calls to Mask in -// a given package wish to allow the same set of causes to be returned. -func MaskFunc(allow ...func(error) bool) func(error, ...func(error) bool) error { - return func(err error, allow1 ...func(error) bool) error { - var allowEither []func(error) bool - if len(allow1) > 0 { - // This is more efficient than using a function literal, - // because the compiler knows that it doesn't escape. - allowEither = make([]func(error) bool, len(allow)+len(allow1)) - copy(allowEither, allow) - copy(allowEither[len(allow):], allow1) - } else { - allowEither = allow - } - err = Mask(err, allowEither...) - setLocation(err, 1) - return err - } -} - -// WithCausef returns a new Error that wraps the given -// (possibly nil) underlying error and associates it with -// the given cause. The given formatted message context -// will also be added. If underlying is nil and f is empty and has no arguments, -// the message will be the same as the cause. -func WithCausef(underlying, cause error, f string, a ...interface{}) error { - var msg string - if underlying == nil && f == "" && len(a) == 0 && cause != nil { - msg = cause.Error() - } else { - msg = fmt.Sprintf(f, a...) - } - err := &Err{ - Underlying_: underlying, - Cause_: cause, - Message_: msg, - } - err.SetLocation(1) - return err -} - -// Cause returns the cause of the given error. If err does not -// implement Causer or its Cause method returns nil, it returns err itself. -// -// Cause is the usual way to diagnose errors that may have -// been wrapped by Mask or NoteMask. -func Cause(err error) error { - var diag error - if err, ok := err.(Causer); ok { - diag = err.Cause() - } - if diag != nil { - return diag - } - return err -} - -// callers returns the stack trace of the goroutine that called it, -// starting n entries above the caller of callers, as a space-separated list -// of filename:line-number pairs with no new lines. -func callers(n, max int) []byte { - var b bytes.Buffer - prev := false - for i := 0; i < max; i++ { - _, file, line, ok := runtime.Caller(n + 1) - if !ok { - return b.Bytes() - } - if prev { - fmt.Fprintf(&b, " ") - } - fmt.Fprintf(&b, "%s:%d", file, line) - n++ - prev = true - } - return b.Bytes() -} diff --git a/vendor/gopkg.in/httprequest.v1/.travis.yml b/vendor/gopkg.in/httprequest.v1/.travis.yml deleted file mode 100644 index 7b1de304..00000000 --- a/vendor/gopkg.in/httprequest.v1/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go_import_path: "gopkg.in/httprequest.v1" -go: - - "1.11.x" -script: GO111MODULE=on go test ./... diff --git a/vendor/gopkg.in/httprequest.v1/LICENSE b/vendor/gopkg.in/httprequest.v1/LICENSE deleted file mode 100644 index 53320c35..00000000 --- a/vendor/gopkg.in/httprequest.v1/LICENSE +++ /dev/null @@ -1,185 +0,0 @@ -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/gopkg.in/httprequest.v1/README.md b/vendor/gopkg.in/httprequest.v1/README.md deleted file mode 100644 index 74a80af1..00000000 --- a/vendor/gopkg.in/httprequest.v1/README.md +++ /dev/null @@ -1,690 +0,0 @@ -# httprequest --- - import "gopkg.in/httprequest.v1" - -Package httprequest provides functionality for marshaling unmarshaling HTTP -request parameters into a struct type. It also provides a way to define methods -as HTTP routes using the same approach. - -It requires at least Go 1.7, and Go 1.9 is required if the importing program -also uses golang.org/x/net/context. - -## Usage - -```go -const ( - CodeBadRequest = "bad request" - CodeUnauthorized = "unauthorized" - CodeForbidden = "forbidden" - CodeNotFound = "not found" -) -``` -These constants are recognized by DefaultErrorMapper as mapping to the similarly -named HTTP status codes. - -```go -var ( - ErrUnmarshal = errgo.New("httprequest unmarshal error") - ErrBadUnmarshalType = errgo.New("httprequest bad unmarshal type") -) -``` - -```go -var DefaultErrorMapper = defaultErrorMapper -``` -DefaultErrorMapper is used by Server when ErrorMapper is nil. It maps all errors -to RemoteError instances; if an error implements the ErrorCoder interface, the -Code field will be set accordingly; some codes will map to specific HTTP status -codes (for example, if ErrorCode returns CodeBadRequest, the resulting HTTP -status will be http.StatusBadRequest). - -```go -var DefaultErrorUnmarshaler = ErrorUnmarshaler(new(RemoteError)) -``` -DefaultErrorUnmarshaler is the default error unmarshaler used by Client. - -#### func AddHandlers - -```go -func AddHandlers(r *httprouter.Router, hs []Handler) -``` -AddHandlers adds all the handlers in the given slice to r. - -#### func ErrorUnmarshaler - -```go -func ErrorUnmarshaler(template error) func(*http.Response) error -``` -ErrorUnmarshaler returns a function which will unmarshal error responses into -new values of the same type as template. The argument must be a pointer. A new -instance of it is created every time the returned function is called. - -If the error cannot by unmarshaled, the function will return an -*HTTPResponseError holding the response from the request. - -#### func Marshal - -```go -func Marshal(baseURL, method string, x interface{}) (*http.Request, error) -``` -Marshal is the counterpart of Unmarshal. It takes information from x, which must -be a pointer to a struct, and returns an HTTP request using the given method -that holds all of the information. - -The Body field in the returned request will always be of type BytesReaderCloser. - -If x implements the HeaderSetter interface, its SetHeader method will be called -to add additional headers to the HTTP request after it has been marshaled. If x -is pointer to a CustomHeader object then Marshal will use its Body member to -create the HTTP request. - -The HTTP request will use the given method. Named fields in the given baseURL -will be filled out from "path"-tagged fields in x to form the URL path in the -returned request. These are specified as for httprouter. - -If a field in baseURL is a suffix of the form "*var" (a trailing wildcard -element that holds the rest of the path), the marshaled string must begin with a -"/". This matches the httprouter convention that it always returns such fields -with a "/" prefix. - -If a field is of type string or []string, the value of the field will be used -directly; otherwise if implements encoding.TextMarshaler, that will be used to -marshal the field, otherwise fmt.Sprint will be used. - -An "omitempty" attribute on a form or header field specifies that if the form or -header value is zero, the form or header entry will be omitted. If the field is -a nil pointer, it will be omitted; otherwise if the field type implements -IsZeroer, that method will be used to determine whether the value is zero, -otherwise if the value is comparable, it will be compared with the zero value -for its type, otherwise the value will never be omitted. One notable -implementation of IsZeroer is time.Time. - -An "inbody" attribute on a form field specifies that the field will be marshaled -as part of an application/x-www-form-urlencoded body. Note that the field may -still be unmarshaled from either a URL query parameter or a form-encoded body. - -For example, this code: - - type UserDetails struct { - Age int - } - - type Test struct { - Username string `httprequest:"user,path"` - ContextId int64 `httprequest:"context,form"` - Extra string `httprequest:"context,form,omitempty"` - Details UserDetails `httprequest:",body"` - } - req, err := Marshal("http://example.com/users/:user/details", "GET", &Test{ - Username: "bob", - ContextId: 1234, - Details: UserDetails{ - Age: 36, - } - }) - if err != nil { - ... - } - -will produce an HTTP request req with a URL of -http://example.com/users/bob/details?context=1234 and a JSON-encoded body -holding `{"Age":36}`. - -It is an error if there is a field specified in the URL that is not found in x. - -#### func ToHTTP - -```go -func ToHTTP(h httprouter.Handle) http.Handler -``` -ToHTTP converts an httprouter.Handle into an http.Handler. It will pass no path -variables to h. - -#### func Unmarshal - -```go -func Unmarshal(p Params, x interface{}) error -``` -Unmarshal takes values from given parameters and fills out fields in x, which -must be a pointer to a struct. - -Tags on the struct's fields determine where each field is filled in from. -Similar to encoding/json and other encoding packages, the tag holds a -comma-separated list. The first item in the list is an alternative name for the -field (the field name itself will be used if this is empty). The next item -specifies where the field is filled in from. It may be: - - "path" - the field is taken from a parameter in p.PathVar - with a matching field name. - - "form" - the field is taken from the given name in p.Request.Form - (note that this covers both URL query parameters and - POST form parameters). - - "header" - the field is taken from the given name in - p.Request.Header. - - "body" - the field is filled in by parsing the request body - as JSON. - -For path and form parameters, the field will be filled out from the field in -p.PathVar or p.Form using one of the following methods (in descending order of -preference): - -- if the type is string, it will be set from the first value. - -- if the type is []string, it will be filled out using all values for that field - - (allowed only for form) - -- if the type implements encoding.TextUnmarshaler, its UnmarshalText method will -be used - -- otherwise fmt.Sscan will be used to set the value. - -When the unmarshaling fails, Unmarshal returns an error with an ErrUnmarshal -cause. If the type of x is inappropriate, it returns an error with an -ErrBadUnmarshalType cause. - -#### func UnmarshalJSONResponse - -```go -func UnmarshalJSONResponse(resp *http.Response, x interface{}) error -``` -UnmarshalJSONResponse unmarshals the given HTTP response into x, which should be -a pointer to the result to be unmarshaled into. - -If the response cannot be unmarshaled, an error of type *DecodeResponseError -will be returned. - -#### func WriteJSON - -```go -func WriteJSON(w http.ResponseWriter, code int, val interface{}) error -``` -WriteJSON writes the given value to the ResponseWriter and sets the HTTP status -to the given code. - -If val implements the HeaderSetter interface, the SetHeader method will be -called to add additional headers to the HTTP response. It is called after the -Content-Type header has been added, so can be used to override the content type -if required. - -#### type BytesReaderCloser - -```go -type BytesReaderCloser struct { - *bytes.Reader -} -``` - -BytesReaderCloser is a bytes.Reader which implements io.Closer with a no-op -Close method. - -#### func (BytesReaderCloser) Close - -```go -func (BytesReaderCloser) Close() error -``` -Close implements io.Closer.Close. - -#### type Client - -```go -type Client struct { - // BaseURL holds the base URL to use when making - // HTTP requests. - BaseURL string - - // Doer holds a value that will be used to actually - // make the HTTP request. If it is nil, http.DefaultClient - // will be used instead. If Doer implements DoerWithContext, - // DoWithContext will be used instead. - Doer Doer - - // If a request returns an HTTP response that signifies an - // error, UnmarshalError is used to unmarshal the response into - // an appropriate error. See ErrorUnmarshaler for a convenient - // way to create an UnmarshalError function for a given type. If - // this is nil, DefaultErrorUnmarshaler will be used. - UnmarshalError func(resp *http.Response) error -} -``` - -Client represents a client that can invoke httprequest endpoints. - -#### func (*Client) Call - -```go -func (c *Client) Call(ctx context.Context, params, resp interface{}) error -``` -Call invokes the endpoint implied by the given params, which should be of the -form accepted by the ArgT argument to a function passed to Handle, and -unmarshals the response into the given response parameter, which should be a -pointer to the response value. - -If params implements the HeaderSetter interface, its SetHeader method will be -called to add additional headers to the HTTP request. - -If resp is nil, the response will be ignored if the request was successful. - -If resp is of type **http.Response, instead of unmarshaling into it, its element -will be set to the returned HTTP response directly and the caller is responsible -for closing its Body field. - -Any error that c.UnmarshalError or c.Doer returns will not have its cause -masked. - -If the request returns a response with a status code signifying success, but the -response could not be unmarshaled, a *DecodeResponseError will be returned -holding the response. Note that if the request returns an error status code, the -Client.UnmarshalError function is responsible for doing this if desired (the -default error unmarshal functions do). - -#### func (*Client) CallURL - -```go -func (c *Client) CallURL(ctx context.Context, url string, params, resp interface{}) error -``` -CallURL is like Call except that the given URL is used instead of c.BaseURL. - -#### func (*Client) Do - -```go -func (c *Client) Do(ctx context.Context, req *http.Request, resp interface{}) error -``` -Do sends the given request and unmarshals its JSON result into resp, which -should be a pointer to the response value. If an error status is returned, the -error will be unmarshaled as in Client.Call. - -If resp is nil, the response will be ignored if the response was successful. - -If resp is of type **http.Response, instead of unmarshaling into it, its element -will be set to the returned HTTP response directly and the caller is responsible -for closing its Body field. - -Any error that c.UnmarshalError or c.Doer returns will not have its cause -masked. - -If req.URL does not have a host part it will be treated as relative to -c.BaseURL. req.URL will be updated to the actual URL used. - -If the response cannot by unmarshaled, a *DecodeResponseError will be returned -holding the response from the request. the entire response body. - -#### func (*Client) Get - -```go -func (c *Client) Get(ctx context.Context, url string, resp interface{}) error -``` -Get is a convenience method that uses c.Do to issue a GET request to the given -URL. If the given URL does not have a host part then it will be treated as -relative to c.BaseURL. - -#### type CustomHeader - -```go -type CustomHeader struct { - // Body holds the JSON-marshaled body of the response. - Body interface{} - - // SetHeaderFunc holds a function that will be called - // to set any custom headers on the response. - SetHeaderFunc func(http.Header) -} -``` - -CustomHeader is a type that allows a JSON value to set custom HTTP headers -associated with the HTTP response. - -#### func (CustomHeader) MarshalJSON - -```go -func (h CustomHeader) MarshalJSON() ([]byte, error) -``` -MarshalJSON implements json.Marshaler by marshaling h.Body. - -#### func (CustomHeader) SetHeader - -```go -func (h CustomHeader) SetHeader(header http.Header) -``` -SetHeader implements HeaderSetter by calling h.SetHeaderFunc. - -#### type DecodeRequestError - -```go -type DecodeRequestError struct { - // Request holds the problematic HTTP request. - // The body of this does not need to be closed - // and may be truncated if the response is large. - Request *http.Request - - // DecodeError holds the error that was encountered - // when decoding. - DecodeError error -} -``` - -DecodeRequestError represents an error when an HTTP request could not be -decoded. - -#### func (*DecodeRequestError) Error - -```go -func (e *DecodeRequestError) Error() string -``` - -#### type DecodeResponseError - -```go -type DecodeResponseError struct { - // Response holds the problematic HTTP response. - // The body of this does not need to be closed - // and may be truncated if the response is large. - Response *http.Response - - // DecodeError holds the error that was encountered - // when decoding. - DecodeError error -} -``` - -DecodeResponseError represents an error when an HTTP response could not be -decoded. - -#### func (*DecodeResponseError) Error - -```go -func (e *DecodeResponseError) Error() string -``` - -#### type Doer - -```go -type Doer interface { - Do(req *http.Request) (*http.Response, error) -} -``` - -Doer is implemented by HTTP client packages to make an HTTP request. It is -notably implemented by http.Client and httpbakery.Client. - -#### type DoerWithContext - -```go -type DoerWithContext interface { - DoWithContext(ctx context.Context, req *http.Request) (*http.Response, error) -} -``` - -DoerWithContext is implemented by HTTP clients that can use a context with the -HTTP request. - -#### type ErrorCoder - -```go -type ErrorCoder interface { - ErrorCode() string -} -``` - -ErrorCoder may be implemented by an error to cause it to return a particular -RemoteError code when DefaultErrorMapper is used. - -#### type ErrorHandler - -```go -type ErrorHandler func(Params) error -``` - -ErrorHandler is like httprouter.Handle except it returns an error which may be -returned as the error body of the response. An ErrorHandler function should not -itself write to the ResponseWriter if it returns an error. - -#### type Handler - -```go -type Handler struct { - Method string - Path string - Handle httprouter.Handle -} -``` - -Handler defines a HTTP handler that will handle the given HTTP method at the -given httprouter path - -#### type HeaderSetter - -```go -type HeaderSetter interface { - SetHeader(http.Header) -} -``` - -HeaderSetter is the interface checked for by WriteJSON. If implemented on a -value passed to WriteJSON, the SetHeader method will be called to allow it to -set custom headers on the response. - -#### type IsZeroer - -```go -type IsZeroer interface { - IsZero() bool -} -``` - -IsZeroer is used when marshaling to determine if a value is zero (see Marshal). - -#### type JSONHandler - -```go -type JSONHandler func(Params) (interface{}, error) -``` - -JSONHandler is like httprouter.Handle except that it returns a body (to be -converted to JSON) and an error. The Header parameter can be used to set custom -headers on the response. - -#### type Params - -```go -type Params struct { - Response http.ResponseWriter - Request *http.Request - PathVar httprouter.Params - // PathPattern holds the path pattern matched by httprouter. - // It is only set where httprequest has the information; - // that is where the call was made by Server.Handler - // or Server.Handlers. - PathPattern string - // Context holds a context for the request. In Go 1.7 and later, - // this should be used in preference to Request.Context. - Context context.Context -} -``` - -Params holds the parameters provided to an HTTP request. - -#### type RemoteError - -```go -type RemoteError struct { - // Message holds the error message. - Message string - - // Code may hold a code that classifies the error. - Code string `json:",omitempty"` - - // Info holds any other information associated with the error. - Info *json.RawMessage `json:",omitempty"` -} -``` - -RemoteError holds the default type of a remote error used by Client when no -custom error unmarshaler is set. This type is also used by DefaultErrorMapper to -marshal errors in Server. - -#### func Errorf - -```go -func Errorf(code string, f string, a ...interface{}) *RemoteError -``` -Errorf returns a new RemoteError instance that uses the given code and formats -the message with fmt.Sprintf(f, a...). If f is empty and there are no other -arguments, code will also be used for the message. - -#### func (*RemoteError) Error - -```go -func (e *RemoteError) Error() string -``` -Error implements the error interface. - -#### func (*RemoteError) ErrorCode - -```go -func (e *RemoteError) ErrorCode() string -``` -ErrorCode implements ErrorCoder by returning e.Code. - -#### type Route - -```go -type Route struct{} -``` - -Route is the type of a field that specifies a routing path and HTTP method. See -Marshal and Unmarshal for details. - -#### type Server - -```go -type Server struct { - // ErrorMapper holds a function that can convert a Go error - // into a form that can be returned as a JSON body from an HTTP request. - // - // The httpStatus value reports the desired HTTP status. - // - // If the returned errorBody implements HeaderSetter, then - // that method will be called to add custom headers to the request. - // - // If this both this and ErrorWriter are nil, DefaultErrorMapper will be used. - ErrorMapper func(ctxt context.Context, err error) (httpStatus int, errorBody interface{}) - - // ErrorWriter is a more general form of ErrorMapper. If this - // field is set, ErrorMapper will be ignored and any returned - // errors will be passed to ErrorWriter, which should use - // w to set the HTTP status and write an appropriate - // error response. - ErrorWriter func(ctx context.Context, w http.ResponseWriter, err error) -} -``` - -Server represents the server side of an HTTP servers, and can be used to create -HTTP handlers although it is not an HTTP handler itself. - -#### func (*Server) Handle - -```go -func (srv *Server) Handle(f interface{}) Handler -``` -Handle converts a function into a Handler. The argument f must be a function of -one of the following six forms, where ArgT must be a struct type acceptable to -Unmarshal and ResultT is a type that can be marshaled as JSON: - - func(p Params, arg *ArgT) - func(p Params, arg *ArgT) error - func(p Params, arg *ArgT) (ResultT, error) - - func(arg *ArgT) - func(arg *ArgT) error - func(arg *ArgT) (ResultT, error) - -When processing a call to the returned handler, the provided parameters are -unmarshaled into a new ArgT value using Unmarshal, then f is called with this -value. If the unmarshaling fails, f will not be called and the unmarshal error -will be written as a JSON response. - -As an additional special case to the rules defined in Unmarshal, the tag on an -anonymous field of type Route specifies the method and path to use in the HTTP -request. It should hold two space-separated fields; the first specifies the HTTP -method, the second the URL path to use for the request. If this is given, the -returned handler will hold that method and path, otherwise they will be empty. - -If an error is returned from f, it is passed through the error mapper before -writing as a JSON response. - -In the third form, when no error is returned, the result is written as a JSON -response with status http.StatusOK. Also in this case, any calls to -Params.Response.Write or Params.Response.WriteHeader will be ignored, as the -response code and data should be defined entirely by the returned result and -error. - -Handle will panic if the provided function is not in one of the above forms. - -#### func (*Server) HandleErrors - -```go -func (srv *Server) HandleErrors(handle ErrorHandler) httprouter.Handle -``` -HandleErrors returns a handler that passes any non-nil error returned by handle -through the error mapper and writes it as a JSON response. - -Note that the Params argument passed to handle will not have its PathPattern set -as that information is not available. - -#### func (*Server) HandleJSON - -```go -func (srv *Server) HandleJSON(handle JSONHandler) httprouter.Handle -``` -HandleJSON returns a handler that writes the return value of handle as a JSON -response. If handle returns an error, it is passed through the error mapper. - -Note that the Params argument passed to handle will not have its PathPattern set -as that information is not available. - -#### func (*Server) Handlers - -```go -func (srv *Server) Handlers(f interface{}) []Handler -``` -Handlers returns a list of handlers that will be handled by the value returned -by the given argument, which must be a function in one of the following forms: - - func(p httprequest.Params) (T, context.Context, error) - func(p httprequest.Params, handlerArg I) (T, context.Context, error) - -for some type T and some interface type I. Each exported method defined on T -defines a handler, and should be in one of the forms accepted by Server.Handle -with the additional constraint that the argument to each of the handlers must be -compatible with the type I when the second form is used above. - -The returned context will be used as the value of Params.Context when Params is -passed to any method. It will also be used when writing an error if the function -returns an error. - -Handlers will panic if f is not of the required form, no methods are defined on -T or any method defined on T is not suitable for Handle. - -When any of the returned handlers is invoked, f will be called and then the -appropriate method will be called on the value it returns. If specified, the -handlerArg parameter to f will hold the ArgT argument that will be passed to the -handler method. - -If T implements io.Closer, its Close method will be called after the request is -completed. - -#### func (*Server) WriteError - -```go -func (srv *Server) WriteError(ctx context.Context, w http.ResponseWriter, err error) -``` -WriteError writes an error to a ResponseWriter and sets the HTTP status code, -using srv.ErrorMapper to determine the actually written response. - -It uses WriteJSON to write the error body returned from the ErrorMapper so it is -possible to add custom headers to the HTTP error response by implementing -HeaderSetter. diff --git a/vendor/gopkg.in/httprequest.v1/client.go b/vendor/gopkg.in/httprequest.v1/client.go deleted file mode 100644 index 7994df58..00000000 --- a/vendor/gopkg.in/httprequest.v1/client.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package httprequest - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strings" - - "gopkg.in/errgo.v1" -) - -// Doer is implemented by HTTP client packages -// to make an HTTP request. It is notably implemented -// by http.Client and httpbakery.Client. -type Doer interface { - Do(req *http.Request) (*http.Response, error) -} - -// DoerWithContext is implemented by HTTP clients that can use a context -// with the HTTP request. -type DoerWithContext interface { - DoWithContext(ctx context.Context, req *http.Request) (*http.Response, error) -} - -// Client represents a client that can invoke httprequest endpoints. -type Client struct { - // BaseURL holds the base URL to use when making - // HTTP requests. - BaseURL string - - // Doer holds a value that will be used to actually - // make the HTTP request. If it is nil, http.DefaultClient - // will be used instead. If Doer implements DoerWithContext, - // DoWithContext will be used instead. - Doer Doer - - // If a request returns an HTTP response that signifies an - // error, UnmarshalError is used to unmarshal the response into - // an appropriate error. See ErrorUnmarshaler for a convenient - // way to create an UnmarshalError function for a given type. If - // this is nil, DefaultErrorUnmarshaler will be used. - UnmarshalError func(resp *http.Response) error -} - -// Call invokes the endpoint implied by the given params, -// which should be of the form accepted by the ArgT -// argument to a function passed to Handle, and -// unmarshals the response into the given response parameter, -// which should be a pointer to the response value. -// -// If params implements the HeaderSetter interface, its SetHeader method -// will be called to add additional headers to the HTTP request. -// -// If resp is nil, the response will be ignored if the -// request was successful. -// -// If resp is of type **http.Response, instead of unmarshaling -// into it, its element will be set to the returned HTTP -// response directly and the caller is responsible for -// closing its Body field. -// -// Any error that c.UnmarshalError or c.Doer returns will not -// have its cause masked. -// -// If the request returns a response with a status code signifying -// success, but the response could not be unmarshaled, a -// *DecodeResponseError will be returned holding the response. Note that if -// the request returns an error status code, the Client.UnmarshalError -// function is responsible for doing this if desired (the default error -// unmarshal functions do). -func (c *Client) Call(ctx context.Context, params, resp interface{}) error { - return c.CallURL(ctx, c.BaseURL, params, resp) -} - -// CallURL is like Call except that the given URL is used instead of -// c.BaseURL. -func (c *Client) CallURL(ctx context.Context, url string, params, resp interface{}) error { - rt, err := getRequestType(reflect.TypeOf(params)) - if err != nil { - return errgo.Mask(err) - } - if rt.method == "" { - return errgo.Newf("type %T has no httprequest.Route field", params) - } - reqURL, err := appendURL(url, rt.path) - if err != nil { - return errgo.Mask(err) - } - req, err := Marshal(reqURL.String(), rt.method, params) - if err != nil { - return errgo.Mask(err) - } - return c.Do(ctx, req, resp) -} - -// Do sends the given request and unmarshals its JSON -// result into resp, which should be a pointer to the response value. -// If an error status is returned, the error will be unmarshaled -// as in Client.Call. -// -// If resp is nil, the response will be ignored if the response was -// successful. -// -// If resp is of type **http.Response, instead of unmarshaling -// into it, its element will be set to the returned HTTP -// response directly and the caller is responsible for -// closing its Body field. -// -// Any error that c.UnmarshalError or c.Doer returns will not -// have its cause masked. -// -// If req.URL does not have a host part it will be treated as relative to -// c.BaseURL. req.URL will be updated to the actual URL used. -// -// If the response cannot by unmarshaled, a *DecodeResponseError -// will be returned holding the response from the request. -// the entire response body. -func (c *Client) Do(ctx context.Context, req *http.Request, resp interface{}) error { - if req.URL.Host == "" { - var err error - req.URL, err = appendURL(c.BaseURL, req.URL.String()) - if err != nil { - return errgo.Mask(err) - } - } - doer := c.Doer - if doer == nil { - doer = http.DefaultClient - } - var httpResp *http.Response - var err error - if ctxDoer, ok := doer.(DoerWithContext); ok { - httpResp, err = ctxDoer.DoWithContext(ctx, req) - } else { - httpResp, err = doer.Do(req.WithContext(ctx)) - } - if err != nil { - return errgo.Mask(urlError(err, req), errgo.Any) - } - return c.unmarshalResponse(httpResp, resp) -} - -// Get is a convenience method that uses c.Do to issue a GET request to -// the given URL. If the given URL does not have a host part then it will -// be treated as relative to c.BaseURL. -func (c *Client) Get(ctx context.Context, url string, resp interface{}) error { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return errgo.Notef(err, "cannot make request") - } - return c.Do(ctx, req, resp) -} - -// unmarshalResponse unmarshals an HTTP response into the given value. -func (c *Client) unmarshalResponse(httpResp *http.Response, resp interface{}) error { - if 200 <= httpResp.StatusCode && httpResp.StatusCode < 300 { - if respPt, ok := resp.(**http.Response); ok { - *respPt = httpResp - return nil - } - defer httpResp.Body.Close() - if err := UnmarshalJSONResponse(httpResp, resp); err != nil { - return errgo.Mask(urlError(err, httpResp.Request), isDecodeResponseError) - } - return nil - } - defer httpResp.Body.Close() - errUnmarshaler := c.UnmarshalError - if errUnmarshaler == nil { - errUnmarshaler = DefaultErrorUnmarshaler - } - err := errUnmarshaler(httpResp) - if err == nil { - err = errgo.Newf("unexpected HTTP response status: %s", httpResp.Status) - } - return errgo.Mask(urlError(err, httpResp.Request), errgo.Any) -} - -// ErrorUnmarshaler returns a function which will unmarshal error -// responses into new values of the same type as template. The argument -// must be a pointer. A new instance of it is created every time the -// returned function is called. -// -// If the error cannot by unmarshaled, the function will return an -// *HTTPResponseError holding the response from the request. -func ErrorUnmarshaler(template error) func(*http.Response) error { - t := reflect.TypeOf(template) - if t.Kind() != reflect.Ptr { - panic(errgo.Newf("cannot unmarshal errors into value of type %T", template)) - } - t = t.Elem() - return func(resp *http.Response) error { - if 300 <= resp.StatusCode && resp.StatusCode < 400 { - // It's a redirection error. - loc, _ := resp.Location() - return newDecodeResponseError(resp, nil, fmt.Errorf("unexpected redirect (status %s) from %q to %q", resp.Status, resp.Request.URL, loc)) - } - errv := reflect.New(t) - if err := UnmarshalJSONResponse(resp, errv.Interface()); err != nil { - return errgo.NoteMask(err, fmt.Sprintf("cannot unmarshal error response (status %s)", resp.Status), isDecodeResponseError) - } - return errv.Interface().(error) - } -} - -// UnmarshalJSONResponse unmarshals the given HTTP response -// into x, which should be a pointer to the result to be -// unmarshaled into. -// -// If the response cannot be unmarshaled, an error of type -// *DecodeResponseError will be returned. -func UnmarshalJSONResponse(resp *http.Response, x interface{}) error { - if x == nil { - return nil - } - if !isJSONMediaType(resp.Header) { - fancyErr := newFancyDecodeError(resp.Header, resp.Body) - return newDecodeResponseError(resp, fancyErr.body, fancyErr) - } - // Read enough data that we can produce a plausible-looking - // possibly-truncated response body in the error. - var buf bytes.Buffer - n, err := io.Copy(&buf, io.LimitReader(resp.Body, int64(maxErrorBodySize))) - - bodyData := buf.Bytes() - if err != nil { - return newDecodeResponseError(resp, bodyData, errgo.Notef(err, "error reading response body")) - } - if n < int64(maxErrorBodySize) { - // We've read all the data; unmarshal it. - if err := json.Unmarshal(bodyData, x); err != nil { - return newDecodeResponseError(resp, bodyData, err) - } - return nil - } - // The response is longer than maxErrorBodySize; stitch the read - // bytes together with the body so that we can still read - // bodies larger than maxErrorBodySize. - dec := json.NewDecoder(io.MultiReader(&buf, resp.Body)) - - // Try to read all the body so that we can reuse the - // connection, but don't try *too* hard. Note that the - // usual number of additional bytes is 1 (a single newline - // after the JSON). - defer io.Copy(ioutil.Discard, io.LimitReader(resp.Body, 8*1024)) - - if err := dec.Decode(x); err != nil { - return newDecodeResponseError(resp, bodyData, err) - } - return nil -} - -// appendURL returns the result of combining the -// given base URL and relative URL. -// -// The path of the relative URL will be appended -// to the base URL, separated by a slash (/) if -// needed. -// -// Any query parameters will be concatenated together. -// -// appendURL will return an error if relURLStr contains -// a host name. -func appendURL(baseURLStr, relURLStr string) (*url.URL, error) { - b, err := url.Parse(baseURLStr) - if err != nil { - return nil, errgo.Notef(err, "cannot parse %q", baseURLStr) - } - r, err := url.Parse(relURLStr) - if err != nil { - return nil, errgo.Notef(err, "cannot parse %q", relURLStr) - } - if r.Host != "" { - return nil, errgo.Newf("relative URL specifies a host") - } - if r.Path != "" { - b.Path = strings.TrimSuffix(b.Path, "/") + "/" + strings.TrimPrefix(r.Path, "/") - } - if r.RawQuery != "" { - if b.RawQuery != "" { - b.RawQuery += "&" + r.RawQuery - } else { - b.RawQuery = r.RawQuery - } - } - return b, nil -} - -func urlError(err error, req *http.Request) error { - _, ok := errgo.Cause(err).(*url.Error) - if ok { - // The error is already sufficiently annotated. - return err - } - // Convert the method to mostly lower case to match net/http's behaviour - // so we don't get silly divergence of messages. - method := req.Method[:1] + strings.ToLower(req.Method[1:]) - return errgo.NoteMask(err, fmt.Sprintf("%s %s", method, req.URL), errgo.Any) -} diff --git a/vendor/gopkg.in/httprequest.v1/error.go b/vendor/gopkg.in/httprequest.v1/error.go deleted file mode 100644 index 44c2f803..00000000 --- a/vendor/gopkg.in/httprequest.v1/error.go +++ /dev/null @@ -1,121 +0,0 @@ -package httprequest - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - errgo "gopkg.in/errgo.v1" -) - -// These constants are recognized by DefaultErrorMapper -// as mapping to the similarly named HTTP status codes. -const ( - CodeBadRequest = "bad request" - CodeUnauthorized = "unauthorized" - CodeForbidden = "forbidden" - CodeNotFound = "not found" -) - -// DefaultErrorUnmarshaler is the default error unmarshaler -// used by Client. -var DefaultErrorUnmarshaler = ErrorUnmarshaler(new(RemoteError)) - -// DefaultErrorMapper is used by Server when ErrorMapper is nil. It maps -// all errors to RemoteError instances; if an error implements the -// ErrorCoder interface, the Code field will be set accordingly; some -// codes will map to specific HTTP status codes (for example, if -// ErrorCode returns CodeBadRequest, the resulting HTTP status will be -// http.StatusBadRequest). -var DefaultErrorMapper = defaultErrorMapper - -func defaultErrorMapper(ctx context.Context, err error) (status int, body interface{}) { - errorBody := errorResponseBody(err) - switch errorBody.Code { - case CodeBadRequest: - status = http.StatusBadRequest - case CodeUnauthorized: - status = http.StatusUnauthorized - case CodeForbidden: - status = http.StatusForbidden - case CodeNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - return status, errorBody -} - -// errorResponse returns an appropriate error -// response for the provided error. -func errorResponseBody(err error) *RemoteError { - var errResp RemoteError - cause := errgo.Cause(err) - if cause, ok := cause.(*RemoteError); ok { - // It's a RemoteError already; Preserve the wrapped - // error message but copy everything else. - errResp = *cause - errResp.Message = err.Error() - return &errResp - } - - // It's not a RemoteError. Preserve as much info as we can find. - errResp.Message = err.Error() - if coder, ok := cause.(ErrorCoder); ok { - errResp.Code = coder.ErrorCode() - } - return &errResp -} - -// ErrorCoder may be implemented by an error to cause -// it to return a particular RemoteError code when -// DefaultErrorMapper is used. -type ErrorCoder interface { - ErrorCode() string -} - -// RemoteError holds the default type of a remote error -// used by Client when no custom error unmarshaler -// is set. This type is also used by DefaultErrorMapper -// to marshal errors in Server. -type RemoteError struct { - // Message holds the error message. - Message string - - // Code may hold a code that classifies the error. - Code string `json:",omitempty"` - - // Info holds any other information associated with the error. - Info *json.RawMessage `json:",omitempty"` -} - -// Error implements the error interface. -func (e *RemoteError) Error() string { - if e.Message == "" { - return "httprequest: no error message found" - } - return e.Message -} - -// ErrorCode implements ErrorCoder by returning e.Code. -func (e *RemoteError) ErrorCode() string { - return e.Code -} - -// Errorf returns a new RemoteError instance that uses the -// given code and formats the message with fmt.Sprintf(f, a...). -// If f is empty and there are no other arguments, code will also -// be used for the message. -func Errorf(code string, f string, a ...interface{}) *RemoteError { - var msg string - if f == "" && len(a) == 0 { - msg = code - } else { - msg = fmt.Sprintf(f, a...) - } - return &RemoteError{ - Code: code, - Message: msg, - } -} diff --git a/vendor/gopkg.in/httprequest.v1/fancyerror.go b/vendor/gopkg.in/httprequest.v1/fancyerror.go deleted file mode 100644 index c553d563..00000000 --- a/vendor/gopkg.in/httprequest.v1/fancyerror.go +++ /dev/null @@ -1,277 +0,0 @@ -package httprequest - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "strings" - "unicode" - - "golang.org/x/net/html" - "golang.org/x/net/html/atom" - "gopkg.in/errgo.v1" -) - -func isDecodeResponseError(err error) bool { - _, ok := err.(*DecodeResponseError) - return ok -} - -// DecodeResponseError represents an error when an HTTP -// response could not be decoded. -type DecodeResponseError struct { - // Response holds the problematic HTTP response. - // The body of this does not need to be closed - // and may be truncated if the response is large. - Response *http.Response - - // DecodeError holds the error that was encountered - // when decoding. - DecodeError error -} - -func (e *DecodeResponseError) Error() string { - return e.DecodeError.Error() -} - -// newDecodeResponseError returns a new DecodeResponseError that -// uses the given error for its message. The Response field -// holds a copy of req. If bodyData is non-nil, it -// will be used as the data in the Response.Body field; -// otherwise body data will be read from req.Body. -func newDecodeResponseError(resp *http.Response, bodyData []byte, err error) *DecodeResponseError { - if bodyData == nil { - bodyData = readBodyForError(resp.Body) - } - resp1 := *resp - resp1.Body = ioutil.NopCloser(bytes.NewReader(bodyData)) - - return &DecodeResponseError{ - Response: &resp1, - DecodeError: errgo.Mask(err, errgo.Any), - } -} - -// newDecodeRequestError returns a new DecodeRequestError that -// uses the given error for its message. The Request field -// holds a copy of req. If bodyData is non-nil, it -// will be used as the data in the Request.Body field; -// otherwise body data will be read from req.Body. -func newDecodeRequestError(req *http.Request, bodyData []byte, err error) *DecodeRequestError { - if bodyData == nil { - bodyData = readBodyForError(req.Body) - } - req1 := *req - req1.Body = ioutil.NopCloser(bytes.NewReader(bodyData)) - - return &DecodeRequestError{ - Request: &req1, - DecodeError: errgo.Mask(err, errgo.Any), - } -} - -// DecodeRequestError represents an error when an HTTP -// request could not be decoded. -type DecodeRequestError struct { - // Request holds the problematic HTTP request. - // The body of this does not need to be closed - // and may be truncated if the response is large. - Request *http.Request - - // DecodeError holds the error that was encountered - // when decoding. - DecodeError error -} - -func (e *DecodeRequestError) Error() string { - return e.DecodeError.Error() -} - -// fancyDecodeError is an error type that tries to -// produce a nice error message when the content -// type of a request or response is wrong. -type fancyDecodeError struct { - // contentType holds the contentType of the request or response. - contentType string - - // body holds up to maxErrorBodySize saved bytes of the - // request or response body. - body []byte -} - -func newFancyDecodeError(h http.Header, body io.Reader) *fancyDecodeError { - return &fancyDecodeError{ - contentType: h.Get("Content-Type"), - body: readBodyForError(body), - } -} - -func readBodyForError(r io.Reader) []byte { - data, _ := ioutil.ReadAll(io.LimitReader(noErrorReader{r}, int64(maxErrorBodySize))) - return data -} - -// maxErrorBodySize holds the maximum amount of body that -// we try to read for an error before extracting text from it. -// It's reasonably large because: -// a) HTML often has large embedded scripts which we want -// to skip and -// b) it should be an relatively unusual case so the size -// shouldn't harm. -// -// It's defined as a variable so that it can be redefined in tests. -var maxErrorBodySize = 200 * 1024 - -// isJSONMediaType reports whether the content type of the given header implies -// that the content is JSON. -func isJSONMediaType(header http.Header) bool { - contentType := header.Get("Content-Type") - mediaType, _, _ := mime.ParseMediaType(contentType) - m := strings.TrimPrefix(mediaType, "application/") - if len(m) == len(mediaType) { - return false - } - // Look for +json suffix. See https://tools.ietf.org/html/rfc6838#section-4.2.8 - // We recognize multiple suffixes too (e.g. application/something+json+other) - // as that seems to be a possibility. - for { - i := strings.Index(m, "+") - if i == -1 { - return m == "json" - } - if m[0:i] == "json" { - return true - } - m = m[i+1:] - } -} - -// Error implements error.Error by trying to produce a decent -// error message derived from the body content. -func (e *fancyDecodeError) Error() string { - mediaType, _, err := mime.ParseMediaType(e.contentType) - if err != nil { - // Even if there's no media type, we want to see something useful. - mediaType = fmt.Sprintf("%q", e.contentType) - } - - // TODO use charset.NewReader to convert from non-utf8 content? - switch mediaType { - case "text/html": - text, err := htmlToText(bytes.NewReader(e.body)) - if err != nil { - // Note: it seems that this can never actually - // happen - the only way that the HTML parser - // can fail is if there's a read error and we've - // removed that possibility by using - // noErrorReader above. - return fmt.Sprintf("unexpected (and invalid) content text/html; want application/json; content: %q", sizeLimit(e.body)) - } - if len(text) == 0 { - return fmt.Sprintf(`unexpected content type text/html; want application/json; content: %q`, sizeLimit(e.body)) - } - return fmt.Sprintf(`unexpected content type text/html; want application/json; content: %s`, sizeLimit(text)) - case "text/plain": - return fmt.Sprintf(`unexpected content type text/plain; want application/json; content: %s`, sizeLimit(sanitizeText(string(e.body), true))) - default: - return fmt.Sprintf(`unexpected content type %s; want application/json; content: %q`, mediaType, sizeLimit(e.body)) - } -} - -// noErrorReader wraps a reader, turning any errors into io.EOF -// so that we can extract some content even if we get an io error. -type noErrorReader struct { - r io.Reader -} - -func (r noErrorReader) Read(buf []byte) (int, error) { - n, err := r.r.Read(buf) - if err != nil { - err = io.EOF - } - return n, err -} - -func sizeLimit(data []byte) []byte { - const max = 1024 - if len(data) < max { - return data - } - return append(data[0:max], fmt.Sprintf(" ... [%d bytes omitted]", len(data)-max)...) -} - -// htmlToText attempts to return some relevant textual content -// from the HTML content in the given reader, formatted -// as a single line. -func htmlToText(r io.Reader) ([]byte, error) { - n, err := html.Parse(r) - if err != nil { - return nil, err - } - var buf bytes.Buffer - htmlNodeToText(&buf, n) - return buf.Bytes(), nil -} - -// htmlNodeToText tries to extract some text from an arbitrary HTML -// page. It doesn't try to avoid looking in the header, because the -// title is in the header and is often the most succinct description of -// the page. -func htmlNodeToText(w *bytes.Buffer, n *html.Node) { - for ; n != nil; n = n.NextSibling { - switch n.Type { - case html.TextNode: - data := sanitizeText(n.Data, false) - if len(data) == 0 { - break - } - if w.Len() > 0 { - w.WriteString("; ") - } - w.Write(data) - case html.ElementNode: - if n.DataAtom != atom.Script { - htmlNodeToText(w, n.FirstChild) - } - case html.DocumentNode: - htmlNodeToText(w, n.FirstChild) - } - } -} - -// sanitizeText tries to make the given string easier to read when presented -// as a single line. It squashes each run of white space into a single -// space, trims leading and trailing white space and trailing full -// stops. If newlineSemi is true, any newlines will be replaced with a -// semicolon. -func sanitizeText(s string, newlineSemi bool) []byte { - out := make([]byte, 0, len(s)) - prevWhite := false - for _, r := range s { - if newlineSemi && r == '\n' && len(out) > 0 { - out = append(out, ';') - prevWhite = true - continue - } - if unicode.IsSpace(r) { - if len(out) > 0 { - prevWhite = true - } - continue - } - if prevWhite { - out = append(out, ' ') - prevWhite = false - } - out = append(out, string(r)...) - } - // Remove final space, any full stops and any final semicolon - // we might have added. - out = bytes.TrimRightFunc(out, func(r rune) bool { - return r == '.' || r == ' ' || r == ';' - }) - return out -} diff --git a/vendor/gopkg.in/httprequest.v1/handler.go b/vendor/gopkg.in/httprequest.v1/handler.go deleted file mode 100644 index cf74a588..00000000 --- a/vendor/gopkg.in/httprequest.v1/handler.go +++ /dev/null @@ -1,669 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package httprequest - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "reflect" - - "github.com/julienschmidt/httprouter" - errgo "gopkg.in/errgo.v1" -) - -// Server represents the server side of an HTTP servers, and can be -// used to create HTTP handlers although it is not an HTTP handler -// itself. -type Server struct { - // ErrorMapper holds a function that can convert a Go error - // into a form that can be returned as a JSON body from an HTTP request. - // - // The httpStatus value reports the desired HTTP status. - // - // If the returned errorBody implements HeaderSetter, then - // that method will be called to add custom headers to the request. - // - // If this both this and ErrorWriter are nil, DefaultErrorMapper will be used. - ErrorMapper func(ctxt context.Context, err error) (httpStatus int, errorBody interface{}) - - // ErrorWriter is a more general form of ErrorMapper. If this - // field is set, ErrorMapper will be ignored and any returned - // errors will be passed to ErrorWriter, which should use - // w to set the HTTP status and write an appropriate - // error response. - ErrorWriter func(ctx context.Context, w http.ResponseWriter, err error) -} - -// Handler defines a HTTP handler that will handle the -// given HTTP method at the given httprouter path -type Handler struct { - Method string - Path string - Handle httprouter.Handle -} - -// handlerFunc represents a function that can handle an HTTP request. -type handlerFunc struct { - // unmarshal unmarshals the request parameters into - // the argument value required by the method. - unmarshal func(p Params) (reflect.Value, error) - - // call invokes the request on the given function value with the - // given argument value (as returned by unmarshal). - call func(fv, argv reflect.Value, p Params) - - // method holds the HTTP method the function will be - // registered for. - method string - - // pathPattern holds the path pattern the function will - // be registered for. - pathPattern string -} - -var ( - paramsType = reflect.TypeOf(Params{}) - errorType = reflect.TypeOf((*error)(nil)).Elem() - contextType = reflect.TypeOf((*context.Context)(nil)).Elem() - httpResponseWriterType = reflect.TypeOf((*http.ResponseWriter)(nil)).Elem() - httpHeaderType = reflect.TypeOf(http.Header(nil)) - httpRequestType = reflect.TypeOf((*http.Request)(nil)) - ioCloserType = reflect.TypeOf((*io.Closer)(nil)).Elem() -) - -// AddHandlers adds all the handlers in the given slice to r. -func AddHandlers(r *httprouter.Router, hs []Handler) { - for _, h := range hs { - r.Handle(h.Method, h.Path, h.Handle) - } -} - -// Handle converts a function into a Handler. The argument f -// must be a function of one of the following six forms, where ArgT -// must be a struct type acceptable to Unmarshal and ResultT is a type -// that can be marshaled as JSON: -// -// func(p Params, arg *ArgT) -// func(p Params, arg *ArgT) error -// func(p Params, arg *ArgT) (ResultT, error) -// -// func(arg *ArgT) -// func(arg *ArgT) error -// func(arg *ArgT) (ResultT, error) -// -// When processing a call to the returned handler, the provided -// parameters are unmarshaled into a new ArgT value using Unmarshal, -// then f is called with this value. If the unmarshaling fails, f will -// not be called and the unmarshal error will be written as a JSON -// response. -// -// As an additional special case to the rules defined in Unmarshal, the -// tag on an anonymous field of type Route specifies the method and path -// to use in the HTTP request. It should hold two space-separated -// fields; the first specifies the HTTP method, the second the URL path -// to use for the request. If this is given, the returned handler will -// hold that method and path, otherwise they will be empty. -// -// If an error is returned from f, it is passed through the error mapper -// before writing as a JSON response. -// -// In the third form, when no error is returned, the result is written -// as a JSON response with status http.StatusOK. Also in this case, any -// calls to Params.Response.Write or Params.Response.WriteHeader will be -// ignored, as the response code and data should be defined entirely by -// the returned result and error. -// -// Handle will panic if the provided function is not in one of the above -// forms. -func (srv *Server) Handle(f interface{}) Handler { - fv := reflect.ValueOf(f) - hf, err := srv.handlerFunc(fv.Type(), nil) - if err != nil { - panic(errgo.Notef(err, "bad handler function")) - } - return Handler{ - Method: hf.method, - Path: hf.pathPattern, - Handle: func(w http.ResponseWriter, req *http.Request, p httprouter.Params) { - ctx := req.Context() - p1 := Params{ - Response: w, - Request: req, - PathVar: p, - PathPattern: hf.pathPattern, - Context: ctx, - } - argv, err := hf.unmarshal(p1) - if err != nil { - srv.WriteError(ctx, w, err) - return - } - hf.call(fv, argv, p1) - }, - } -} - -// Handlers returns a list of handlers that will be handled by the value -// returned by the given argument, which must be a function in one of the -// following forms: -// -// func(p httprequest.Params) (T, context.Context, error) -// func(p httprequest.Params, handlerArg I) (T, context.Context, error) -// -// for some type T and some interface type I. Each exported method defined on T defines a handler, -// and should be in one of the forms accepted by Server.Handle -// with the additional constraint that the argument to each -// of the handlers must be compatible with the type I when the -// second form is used above. -// -// The returned context will be used as the value of Params.Context -// when Params is passed to any method. It will also be used -// when writing an error if the function returns an error. -// -// Handlers will panic if f is not of the required form, no methods are -// defined on T or any method defined on T is not suitable for Handle. -// -// When any of the returned handlers is invoked, f will be called and -// then the appropriate method will be called on the value it returns. -// If specified, the handlerArg parameter to f will hold the ArgT argument that -// will be passed to the handler method. -// -// If T implements io.Closer, its Close method will be called -// after the request is completed. -func (srv *Server) Handlers(f interface{}) []Handler { - rootv := reflect.ValueOf(f) - wt, argInterfacet, err := checkHandlersWrapperFunc(rootv) - if err != nil { - panic(errgo.Notef(err, "bad handler function")) - } - hasClose := wt.Implements(ioCloserType) - hs := make([]Handler, 0, wt.NumMethod()) - for i := 0; i < wt.NumMethod(); i++ { - i := i - m := wt.Method(i) - if m.PkgPath != "" { - continue - } - if m.Name == "Close" { - if !hasClose { - panic(errgo.Newf("bad type for Close method (got %v want func(%v) error", m.Type, wt)) - } - continue - } - if wt.Kind() != reflect.Interface { - // The type in the Method struct includes the receiver type, - // which we don't want to look at (and we won't see when - // we get the method from the actual value at dispatch time), - // so we hide it. - m.Type = withoutReceiver(m.Type) - } - h, err := srv.methodHandler(m, rootv, argInterfacet, hasClose) - if err != nil { - panic(err) - } - hs = append(hs, h) - } - if len(hs) == 0 { - panic(errgo.Newf("no exported methods defined on %s", wt)) - } - return hs -} - -func (srv *Server) methodHandler(m reflect.Method, rootv reflect.Value, argInterfacet reflect.Type, hasClose bool) (Handler, error) { - hf, err := srv.handlerFunc(m.Type, argInterfacet) - if err != nil { - return Handler{}, errgo.Notef(err, "bad type for method %s", m.Name) - } - if hf.method == "" || hf.pathPattern == "" { - return Handler{}, errgo.Notef(err, "method %s does not specify route method and path", m.Name) - } - handler := func(w http.ResponseWriter, req *http.Request, p httprouter.Params) { - ctx := req.Context() - p1 := Params{ - Response: w, - Request: req, - PathVar: p, - PathPattern: hf.pathPattern, - Context: ctx, - } - inv, err := hf.unmarshal(p1) - if err != nil { - srv.WriteError(ctx, w, err) - return - } - var outv []reflect.Value - if argInterfacet != nil { - outv = rootv.Call([]reflect.Value{ - reflect.ValueOf(p1), - // Pass the value to the root function so it can do wrappy things with it. - // Note that because of the checks we've applied earlier, we can be - // sure that the value will implement the interface type of this argument. - inv, - }) - } else { - outv = rootv.Call([]reflect.Value{ - reflect.ValueOf(p1), - }) - } - tv, ctxv, errv := outv[0], outv[1], outv[2] - // Get the context value robustly even if the - // handler stupidly decides to return nil, and fall - // back to the original context if it does. - ctx1, _ := ctxv.Interface().(context.Context) - if ctx1 != nil { - ctx = ctx1 - } - if !errv.IsNil() { - srv.WriteError(ctx, w, errv.Interface().(error)) - return - } - if hasClose { - defer tv.Interface().(io.Closer).Close() - } - hf.call(tv.Method(m.Index), inv, Params{ - Response: w, - Request: req, - PathVar: p, - PathPattern: hf.pathPattern, - Context: ctx, - }) - } - return Handler{ - Method: hf.method, - Path: hf.pathPattern, - Handle: handler, - }, nil -} - -func checkHandlersWrapperFunc(fv reflect.Value) (returnt, argInterfacet reflect.Type, err error) { - ft := fv.Type() - if ft.Kind() != reflect.Func { - return nil, nil, errgo.Newf("expected function, got %v", ft) - } - if fv.IsNil() { - return nil, nil, errgo.Newf("function is nil") - } - if n := ft.NumIn(); n != 1 && n != 2 { - return nil, nil, errgo.Newf("got %d arguments, want 1 or 2", n) - } - if n := ft.NumOut(); n != 3 { - return nil, nil, errgo.Newf("function returns %d values, want (<T>, context.Context, error)", n) - } - if t := ft.In(0); t != paramsType { - return nil, nil, errgo.Newf("invalid first argument, want httprequest.Params, got %v", t) - } - if ft.NumIn() > 1 { - if t := ft.In(1); t.Kind() != reflect.Interface { - return nil, nil, errgo.Newf("invalid second argument, want interface type, got %v", t) - } - argInterfacet = ft.In(1) - } - if t := ft.Out(1); !t.Implements(contextType) { - return nil, nil, errgo.Newf("second return parameter of type %v does not implement context.Context", t) - } - if t := ft.Out(2); t != errorType { - return nil, nil, errgo.Newf("invalid third return parameter, want error, got %v", t) - } - return ft.Out(0), argInterfacet, nil -} - -func checkHandleType(t, argInterfacet reflect.Type) (*requestType, error) { - if t.Kind() != reflect.Func { - return nil, errgo.New("not a function") - } - if n := t.NumIn(); n != 1 && n != 2 { - return nil, errgo.Newf("has %d parameters, need 1 or 2", t.NumIn()) - } - if t.NumOut() > 2 { - return nil, errgo.Newf("has %d result parameters, need 0, 1 or 2", t.NumOut()) - } - if t.NumIn() == 2 { - if t.In(0) != paramsType { - return nil, errgo.Newf("first argument is %v, need httprequest.Params", t.In(0)) - } - } else { - if t.In(0) == paramsType { - return nil, errgo.Newf("no argument parameter after Params argument") - } - } - argt := t.In(t.NumIn() - 1) - pt, err := getRequestType(argt) - if err != nil { - return nil, errgo.Notef(err, "last argument cannot be used for Unmarshal") - } - if argInterfacet != nil && !argt.Implements(argInterfacet) { - return nil, errgo.Notef(err, "argument of type %v does not implement interface required by root handler %v", argt, argInterfacet) - } - if t.NumOut() > 0 { - // func(p Params, arg *ArgT) error - // func(p Params, arg *ArgT) (ResultT, error) - if et := t.Out(t.NumOut() - 1); et != errorType { - return nil, errgo.Newf("final result parameter is %s, need error", et) - } - } - return pt, nil -} - -// handlerFunc returns a function that will call a function of the given type, -// unmarshaling request parameters and marshaling the response as -// appropriate. -func (srv *Server) handlerFunc(ft, argInterfacet reflect.Type) (handlerFunc, error) { - rt, err := checkHandleType(ft, argInterfacet) - if err != nil { - return handlerFunc{}, errgo.Mask(err) - } - return handlerFunc{ - unmarshal: handlerUnmarshaler(ft, rt), - call: srv.handlerCaller(ft, rt), - method: rt.method, - pathPattern: rt.path, - }, nil -} - -func handlerUnmarshaler( - ft reflect.Type, - rt *requestType, -) func(p Params) (reflect.Value, error) { - argStructType := ft.In(ft.NumIn() - 1).Elem() - return func(p Params) (reflect.Value, error) { - if err := p.Request.ParseForm(); err != nil { - return reflect.Value{}, errgo.WithCausef(err, ErrUnmarshal, "cannot parse HTTP request form") - } - argv := reflect.New(argStructType) - if err := unmarshal(p, argv, rt); err != nil { - return reflect.Value{}, errgo.NoteMask(err, "cannot unmarshal parameters", errgo.Is(ErrUnmarshal)) - } - return argv, nil - } -} - -func (srv *Server) handlerCaller( - ft reflect.Type, - rt *requestType, -) func(fv, argv reflect.Value, p Params) { - returnJSON := ft.NumOut() > 1 - needsParams := ft.In(0) == paramsType - respond := srv.handlerResponder(ft) - return func(fv, argv reflect.Value, p Params) { - var rv []reflect.Value - if needsParams { - p := p - if returnJSON { - p.Response = headerOnlyResponseWriter{p.Response.Header()} - } - rv = fv.Call([]reflect.Value{ - reflect.ValueOf(p), - argv, - }) - } else { - rv = fv.Call([]reflect.Value{ - argv, - }) - } - respond(p, rv) - } -} - -// handlerResponder handles the marshaling of the result values from the call to a function -// of type ft. The returned function accepts the values returned by the handler. -func (srv *Server) handlerResponder(ft reflect.Type) func(p Params, outv []reflect.Value) { - switch ft.NumOut() { - case 0: - // func(...) - return func(Params, []reflect.Value) {} - case 1: - // func(...) error - return func(p Params, outv []reflect.Value) { - if err := outv[0].Interface(); err != nil { - srv.WriteError(p.Context, p.Response, err.(error)) - } - } - case 2: - // func(...) (ResultT, error) - return func(p Params, outv []reflect.Value) { - if err := outv[1].Interface(); err != nil { - srv.WriteError(p.Context, p.Response, err.(error)) - return - } - if err := WriteJSON(p.Response, http.StatusOK, outv[0].Interface()); err != nil { - srv.WriteError(p.Context, p.Response, err) - } - } - default: - panic("unreachable") - } -} - -// ToHTTP converts an httprouter.Handle into an http.Handler. -// It will pass any path variables found in the request context -// through to h. -func ToHTTP(h httprouter.Handle) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - h(w, req, httprouter.ParamsFromContext(req.Context())) - }) -} - -// JSONHandler is like httprouter.Handle except that it returns a -// body (to be converted to JSON) and an error. -// The Header parameter can be used to set -// custom headers on the response. -type JSONHandler func(Params) (interface{}, error) - -// ErrorHandler is like httprouter.Handle except it returns an error -// which may be returned as the error body of the response. -// An ErrorHandler function should not itself write to the ResponseWriter -// if it returns an error. -type ErrorHandler func(Params) error - -// HandleJSON returns a handler that writes the return value of handle -// as a JSON response. If handle returns an error, it is passed through -// the error mapper. -// -// Note that the Params argument passed to handle will not -// have its PathPattern set as that information is not available. -func (srv *Server) HandleJSON(handle JSONHandler) httprouter.Handle { - return func(w http.ResponseWriter, req *http.Request, p httprouter.Params) { - ctx := req.Context() - val, err := handle(Params{ - Response: headerOnlyResponseWriter{w.Header()}, - Request: req, - PathVar: p, - Context: ctx, - }) - if err == nil { - if err = WriteJSON(w, http.StatusOK, val); err == nil { - return - } - } - srv.WriteError(ctx, w, err) - } -} - -// HandleErrors returns a handler that passes any non-nil error returned -// by handle through the error mapper and writes it as a JSON response. -// -// Note that the Params argument passed to handle will not -// have its PathPattern set as that information is not available. -func (srv *Server) HandleErrors(handle ErrorHandler) httprouter.Handle { - return func(w http.ResponseWriter, req *http.Request, p httprouter.Params) { - w1 := responseWriter{ - ResponseWriter: w, - } - ctx := req.Context() - if err := handle(Params{ - Response: &w1, - Request: req, - PathVar: p, - Context: ctx, - }); err != nil { - if w1.headerWritten { - // The header has already been written, - // so we can't set the appropriate error - // response code and there's a danger - // that we may be corrupting the - // response by appending a JSON error - // message to it. - // TODO log an error in this case. - return - } - srv.WriteError(ctx, w, err) - } - } -} - -// WriteError writes an error to a ResponseWriter and sets the HTTP -// status code, using srv.ErrorMapper to determine the actually written -// response. -// -// It uses WriteJSON to write the error body returned from the -// ErrorMapper so it is possible to add custom headers to the HTTP error -// response by implementing HeaderSetter. -func (srv *Server) WriteError(ctx context.Context, w http.ResponseWriter, err error) { - if srv.ErrorWriter != nil { - srv.ErrorWriter(ctx, w, err) - return - } - errorMapper := srv.ErrorMapper - if errorMapper == nil { - errorMapper = DefaultErrorMapper - } - status, resp := errorMapper(ctx, err) - err1 := WriteJSON(w, status, resp) - if err1 == nil { - return - } - // TODO log an error ? - - // JSON-marshaling the original error failed, so try to send that - // error instead; if that fails, give up and go home. - status1, resp1 := errorMapper(ctx, errgo.Notef(err1, "cannot marshal error response %q", err)) - err2 := WriteJSON(w, status1, resp1) - if err2 == nil { - return - } - - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte(fmt.Sprintf("really cannot marshal error response %q: %v", err, err1))) -} - -// WriteJSON writes the given value to the ResponseWriter -// and sets the HTTP status to the given code. -// -// If val implements the HeaderSetter interface, the SetHeader -// method will be called to add additional headers to the -// HTTP response. It is called after the Content-Type header -// has been added, so can be used to override the content type -// if required. -func WriteJSON(w http.ResponseWriter, code int, val interface{}) error { - // TODO consider marshalling directly to w using json.NewEncoder. - // pro: this will not require a full buffer allocation. - // con: if there's an error after the first write, it will be lost. - data, err := json.Marshal(val) - if err != nil { - return errgo.Mask(err) - } - w.Header().Set("content-type", "application/json") - if headerSetter, ok := val.(HeaderSetter); ok { - headerSetter.SetHeader(w.Header()) - } - w.WriteHeader(code) - w.Write(data) - return nil -} - -// HeaderSetter is the interface checked for by WriteJSON. -// If implemented on a value passed to WriteJSON, the SetHeader -// method will be called to allow it to set custom headers -// on the response. -type HeaderSetter interface { - SetHeader(http.Header) -} - -// CustomHeader is a type that allows a JSON value to -// set custom HTTP headers associated with the -// HTTP response. -type CustomHeader struct { - // Body holds the JSON-marshaled body of the response. - Body interface{} - - // SetHeaderFunc holds a function that will be called - // to set any custom headers on the response. - SetHeaderFunc func(http.Header) -} - -// MarshalJSON implements json.Marshaler by marshaling -// h.Body. -func (h CustomHeader) MarshalJSON() ([]byte, error) { - return json.Marshal(h.Body) -} - -// SetHeader implements HeaderSetter by calling -// h.SetHeaderFunc. -func (h CustomHeader) SetHeader(header http.Header) { - h.SetHeaderFunc(header) -} - -// Ensure statically that responseWriter does implement http.Flusher. -var _ http.Flusher = (*responseWriter)(nil) - -// responseWriter wraps http.ResponseWriter but allows us -// to find out whether any body has already been written. -type responseWriter struct { - headerWritten bool - http.ResponseWriter -} - -func (w *responseWriter) Write(data []byte) (int, error) { - w.headerWritten = true - return w.ResponseWriter.Write(data) -} - -func (w *responseWriter) WriteHeader(code int) { - w.headerWritten = true - w.ResponseWriter.WriteHeader(code) -} - -// Flush implements http.Flusher.Flush. -func (w *responseWriter) Flush() { - w.headerWritten = true - if f, ok := w.ResponseWriter.(http.Flusher); ok { - f.Flush() - } -} - -type headerOnlyResponseWriter struct { - h http.Header -} - -func (w headerOnlyResponseWriter) Header() http.Header { - return w.h -} - -func (w headerOnlyResponseWriter) Write([]byte) (int, error) { - // TODO log or panic when this happens? - return 0, errgo.New("inappropriate call to ResponseWriter.Write in JSON-returning handler") -} - -func (w headerOnlyResponseWriter) WriteHeader(code int) { - // TODO log or panic when this happens? -} - -func withoutReceiver(t reflect.Type) reflect.Type { - return withoutReceiverType{t} -} - -type withoutReceiverType struct { - reflect.Type -} - -func (t withoutReceiverType) NumIn() int { - return t.Type.NumIn() - 1 -} - -func (t withoutReceiverType) In(i int) reflect.Type { - return t.Type.In(i + 1) -} diff --git a/vendor/gopkg.in/httprequest.v1/marshal.go b/vendor/gopkg.in/httprequest.v1/marshal.go deleted file mode 100644 index 9a884bbd..00000000 --- a/vendor/gopkg.in/httprequest.v1/marshal.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package httprequest - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "strings" - - "github.com/julienschmidt/httprouter" - "gopkg.in/errgo.v1" -) - -// Marshal is the counterpart of Unmarshal. It takes information from -// x, which must be a pointer to a struct, and returns an HTTP request -// using the given method that holds all of the information. -// -// The Body field in the returned request will always be of type -// BytesReaderCloser. -// -// If x implements the HeaderSetter interface, its SetHeader method will -// be called to add additional headers to the HTTP request after it has -// been marshaled. If x is pointer to a CustomHeader object then Marshal will use -// its Body member to create the HTTP request. -// -// The HTTP request will use the given method. Named fields in the given -// baseURL will be filled out from "path"-tagged fields in x to form the -// URL path in the returned request. These are specified as for httprouter. -// -// If a field in baseURL is a suffix of the form "*var" (a trailing wildcard element -// that holds the rest of the path), the marshaled string must begin with a "/". -// This matches the httprouter convention that it always returns such fields -// with a "/" prefix. -// -// If a field is of type string or []string, the value of the field will -// be used directly; otherwise if implements encoding.TextMarshaler, that -// will be used to marshal the field, otherwise fmt.Sprint will be used. -// -// An "omitempty" attribute on a form or header field specifies that -// if the form or header value is zero, the form or header entry -// will be omitted. If the field is a nil pointer, it will be omitted; -// otherwise if the field type implements IsZeroer, that method -// will be used to determine whether the value is zero, otherwise -// if the value is comparable, it will be compared with the zero -// value for its type, otherwise the value will never be omitted. -// One notable implementation of IsZeroer is time.Time. -// -// An "inbody" attribute on a form field specifies that the field will -// be marshaled as part of an application/x-www-form-urlencoded body. -// Note that the field may still be unmarshaled from either a URL query -// parameter or a form-encoded body. -// -// For example, this code: -// -// type UserDetails struct { -// Age int -// } -// -// type Test struct { -// Username string `httprequest:"user,path"` -// ContextId int64 `httprequest:"context,form"` -// Extra string `httprequest:"context,form,omitempty"` -// Details UserDetails `httprequest:",body"` -// } -// req, err := Marshal("http://example.com/users/:user/details", "GET", &Test{ -// Username: "bob", -// ContextId: 1234, -// Details: UserDetails{ -// Age: 36, -// } -// }) -// if err != nil { -// ... -// } -// -// will produce an HTTP request req with a URL of -// http://example.com/users/bob/details?context=1234 and a JSON-encoded -// body holding `{"Age":36}`. -// -// It is an error if there is a field specified in the URL that is not -// found in x. -func Marshal(baseURL, method string, x interface{}) (*http.Request, error) { - var xv reflect.Value - if ch, ok := x.(*CustomHeader); ok { - xv = reflect.ValueOf(ch.Body) - } else { - xv = reflect.ValueOf(x) - } - pt, err := getRequestType(xv.Type()) - if err != nil { - return nil, errgo.WithCausef(err, ErrBadUnmarshalType, "bad type %s", xv.Type()) - } - req, err := http.NewRequest(method, baseURL, BytesReaderCloser{bytes.NewReader(nil)}) - if err != nil { - return nil, errgo.Mask(err) - } - req.GetBody = func() (io.ReadCloser, error) { return BytesReaderCloser{bytes.NewReader(nil)}, nil } - req.Form = url.Values{} - if pt.formBody { - // Use req.PostForm as a place to put the values that - // will be marshaled as part of the form body. - // It's ignored by http.Client, but that's OK because - // we'll make the body ourselves later. - req.PostForm = url.Values{} - } - p := &Params{ - Request: req, - } - if err := marshal(p, xv, pt); err != nil { - return nil, errgo.Mask(err, errgo.Is(ErrUnmarshal)) - } - if pt.formBody { - data := []byte(req.PostForm.Encode()) - p.Request.Body = BytesReaderCloser{bytes.NewReader(data)} - p.Request.GetBody = func() (io.ReadCloser, error) { return BytesReaderCloser{bytes.NewReader(data)}, nil } - p.Request.ContentLength = int64(len(data)) - p.Request.Header.Set("Content-Type", "application/x-www-form-urlencoded") - p.Request.PostForm = nil - } - if headerSetter, ok := x.(HeaderSetter); ok { - headerSetter.SetHeader(p.Request.Header) - } - return p.Request, nil -} - -// marshal is the internal version of Marshal. -func marshal(p *Params, xv reflect.Value, pt *requestType) error { - xv = xv.Elem() - for _, f := range pt.fields { - fv := xv.FieldByIndex(f.index) - if f.isPointer { - if fv.IsNil() { - continue - } - fv = fv.Elem() - } - // TODO store the field name in the field so - // that we can produce a nice error message. - if err := f.marshal(fv, p); err != nil { - return errgo.WithCausef(err, ErrUnmarshal, "cannot marshal field") - } - } - path, err := buildPath(p.Request.URL.Path, p.PathVar) - if err != nil { - return errgo.Mask(err) - } - p.Request.URL.Path = path - if q := p.Request.Form.Encode(); q != "" && p.Request.URL.RawQuery != "" { - p.Request.URL.RawQuery += "&" + q - } else { - p.Request.URL.RawQuery += q - } - return nil -} - -func buildPath(path string, p httprouter.Params) (string, error) { - pathBytes := make([]byte, 0, len(path)*2) - for { - s, rest := nextPathSegment(path) - if s == "" { - break - } - if s[0] != ':' && s[0] != '*' { - pathBytes = append(pathBytes, s...) - path = rest - continue - } - if s[0] == '*' && rest != "" { - return "", errgo.New("star path parameter is not at end of path") - } - if len(s) == 1 { - return "", errgo.New("empty path parameter") - } - val := p.ByName(s[1:]) - if val == "" { - return "", errgo.Newf("missing value for path parameter %q", s[1:]) - } - if s[0] == '*' { - if !strings.HasPrefix(val, "/") { - return "", errgo.Newf("value %q for path parameter %q does not start with required /", val, s) - } - val = val[1:] - } - pathBytes = append(pathBytes, val...) - path = rest - } - return string(pathBytes), nil -} - -// nextPathSegment returns the next wildcard or constant -// segment of the given path and everything after that -// segment. -func nextPathSegment(s string) (string, string) { - if s == "" { - return "", "" - } - if s[0] == ':' || s[0] == '*' { - if i := strings.Index(s, "/"); i != -1 { - return s[0:i], s[i:] - } - return s, "" - } - if i := strings.IndexAny(s, ":*"); i != -1 { - return s[0:i], s[i:] - } - return s, "" -} - -// getMarshaler returns a marshaler function suitable for marshaling -// a field with the given tag into an HTTP request. -func getMarshaler(tag tag, t reflect.Type) (marshaler, error) { - switch { - case tag.source == sourceNone: - return marshalNop, nil - case tag.source == sourceBody: - return marshalBody, nil - case t == reflect.TypeOf([]string(nil)): - switch tag.source { - default: - return nil, errgo.New("invalid target type []string for path parameter") - case sourceForm: - return marshalAllForm(tag.name), nil - case sourceFormBody: - return marshalAllFormBody(tag.name), nil - case sourceHeader: - return marshalAllHeader(tag.name), nil - } - case t == reflect.TypeOf(""): - return marshalString(tag), nil - case implementsTextMarshaler(t): - return marshalWithMarshalText(t, tag), nil - default: - return marshalWithSprint(t, tag), nil - } -} - -// marshalNop does nothing with the value. -func marshalNop(v reflect.Value, p *Params) error { - return nil -} - -// marshalBody marshals the specified value into the body of the http request. -func marshalBody(v reflect.Value, p *Params) error { - // TODO allow body types that aren't necessarily JSON. - data, err := json.Marshal(v.Addr().Interface()) - if err != nil { - return errgo.Notef(err, "cannot marshal request body") - } - p.Request.Body = BytesReaderCloser{bytes.NewReader(data)} - p.Request.GetBody = func() (io.ReadCloser, error) { return BytesReaderCloser{bytes.NewReader(data)}, nil } - p.Request.ContentLength = int64(len(data)) - p.Request.Header.Set("Content-Type", "application/json") - return nil -} - -// marshalAllForm marshals a []string slice into form fields. -func marshalAllForm(name string) marshaler { - return func(v reflect.Value, p *Params) error { - if ss := v.Interface().([]string); len(ss) > 0 { - p.Request.Form[name] = ss - } - return nil - } -} - -// marshalAllFormBody marshals a []string slice into form body fields. -func marshalAllFormBody(name string) marshaler { - return func(v reflect.Value, p *Params) error { - if ss := v.Interface().([]string); len(ss) > 0 { - p.Request.PostForm[name] = ss - } - return nil - } -} - -// marshalAllHeader marshals a []string slice into a header. -func marshalAllHeader(name string) marshaler { - return func(v reflect.Value, p *Params) error { - if ss := v.Interface().([]string); len(ss) > 0 { - p.Request.Header[name] = ss - } - return nil - } -} - -// marshalString marshals s string field. -func marshalString(tag tag) marshaler { - formSet := formSetter(tag) - return func(v reflect.Value, p *Params) error { - s := v.String() - if tag.omitempty && s == "" { - return nil - } - formSet(tag.name, v.String(), p) - return nil - } -} - -// encodingTextMarshaler is the same as encoding.TextUnmarshaler -// but avoids us importing the encoding package, which some -// broken gccgo installations do not allow. -// TODO remove this and use encoding.TextMarshaler instead. -type encodingTextMarshaler interface { - MarshalText() (text []byte, err error) -} - -var textMarshalerType = reflect.TypeOf((*encodingTextMarshaler)(nil)).Elem() - -func implementsTextMarshaler(t reflect.Type) bool { - // Use the pointer type, because a pointer - // type will implement a superset of the methods - // of a non-pointer type. - return reflect.PtrTo(t).Implements(textMarshalerType) -} - -// marshalWithMarshalText returns a marshaler -// that marshals the given type from the given tag -// using its MarshalText method. -func marshalWithMarshalText(t reflect.Type, tag tag) marshaler { - formSet := formSetter(tag) - omit := omitter(t, tag) - return func(v reflect.Value, p *Params) error { - if omit(v) { - return nil - } - m := v.Addr().Interface().(encodingTextMarshaler) - data, err := m.MarshalText() - if err != nil { - return errgo.Mask(err) - } - formSet(tag.name, string(data), p) - return nil - } -} - -// IsZeroer is used when marshaling to determine if a value -// is zero (see Marshal). -type IsZeroer interface { - IsZero() bool -} - -var isZeroerType = reflect.TypeOf((*IsZeroer)(nil)).Elem() - -// omitter returns a function that determins if a value -// with the given type and tag should be omitted from -// marshal output. The value passed to the function -// will be the underlying value, not its address. -// -// It returns nil if the value should never be omitted. -func omitter(t reflect.Type, tag tag) func(reflect.Value) bool { - never := func(reflect.Value) bool { - return false - } - if !tag.omitempty { - return never - } - if reflect.PtrTo(t).Implements(isZeroerType) { - return func(v reflect.Value) bool { - return v.Addr().Interface().(IsZeroer).IsZero() - } - } - if t.Comparable() { - zeroVal := reflect.Zero(t).Interface() - return func(v reflect.Value) bool { - return v.Interface() == zeroVal - } - } - return never -} - -// marshalWithSprint returns an marshaler -// that unmarshals the given tag using fmt.Sprint. -func marshalWithSprint(t reflect.Type, tag tag) marshaler { - formSet := formSetter(tag) - omit := omitter(t, tag) - return func(v reflect.Value, p *Params) error { - if omit(v) { - return nil - } - formSet(tag.name, fmt.Sprint(v.Interface()), p) - return nil - } -} - -// formSetter returns a function that can set the value -// for a given tag. -func formSetter(t tag) func(name, value string, p *Params) { - formSet := formSetters[t.source] - if formSet == nil { - panic("unexpected source") - } - if !t.omitempty { - return formSet - } - return func(name, value string, p *Params) { - if value != "" { - formSet(name, value, p) - } - } -} - -// formSetters maps from source to a function that -// sets the value for a given key. -var formSetters = []func(string, string, *Params){ - sourceForm: func(name, value string, p *Params) { - p.Request.Form.Set(name, value) - }, - sourceFormBody: func(name, value string, p *Params) { - p.Request.PostForm.Set(name, value) - }, - sourcePath: func(name, value string, p *Params) { - p.PathVar = append(p.PathVar, httprouter.Param{Key: name, Value: value}) - }, - sourceBody: nil, - sourceHeader: func(name, value string, p *Params) { - p.Request.Header.Set(name, value) - }, -} - -// BytesReaderCloser is a bytes.Reader which -// implements io.Closer with a no-op Close method. -type BytesReaderCloser struct { - *bytes.Reader -} - -// Close implements io.Closer.Close. -func (BytesReaderCloser) Close() error { - return nil -} diff --git a/vendor/gopkg.in/httprequest.v1/type.go b/vendor/gopkg.in/httprequest.v1/type.go deleted file mode 100644 index 03e10cd3..00000000 --- a/vendor/gopkg.in/httprequest.v1/type.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -// Package httprequest provides functionality for marshaling -// unmarshaling HTTP request parameters into a struct type. -// It also provides a way to define methods as HTTP routes -// using the same approach. -// -// It requires at least Go 1.7, and Go 1.9 is required if the importing -// program also uses golang.org/x/net/context. -package httprequest - -import ( - "context" - "fmt" - "net/http" - "reflect" - "sort" - "strings" - "sync" - - "github.com/julienschmidt/httprouter" - "gopkg.in/errgo.v1" -) - -// TODO include field name and source in error messages. - -var ( - typeMutex sync.RWMutex - typeMap = make(map[reflect.Type]*requestType) -) - -// Route is the type of a field that specifies a routing -// path and HTTP method. See Marshal and Unmarshal -// for details. -type Route struct{} - -// Params holds the parameters provided to an HTTP request. -type Params struct { - Response http.ResponseWriter - Request *http.Request - PathVar httprouter.Params - // PathPattern holds the path pattern matched by httprouter. - // It is only set where httprequest has the information; - // that is where the call was made by Server.Handler - // or Server.Handlers. - PathPattern string - // Context holds a context for the request. In Go 1.7 and later, - // this should be used in preference to Request.Context. - Context context.Context -} - -// resultMaker is provided to the unmarshal functions. -// When called with the value passed to the unmarshaler, -// it returns the field value to be assigned to, -// creating it if necessary. -type resultMaker func(reflect.Value) reflect.Value - -// unmarshaler unmarshals some value from params into -// the given value. The value should not be assigned to directly, -// but passed to makeResult and then updated. -type unmarshaler func(v reflect.Value, p Params, makeResult resultMaker) error - -// marshaler marshals the specified value into params. -// The value is always the value type, even if the field type -// is a pointer. -type marshaler func(reflect.Value, *Params) error - -// requestType holds information derived from a request -// type, preprocessed so that it's quick to marshal or unmarshal. -type requestType struct { - method string - path string - formBody bool - fields []field -} - -// field holds preprocessed information on an individual field -// in the request. -type field struct { - name string - - // index holds the index slice of the field. - index []int - - // unmarshal is used to unmarshal the value into - // the given field. The value passed as its first - // argument is not a pointer type, but is addressable. - unmarshal unmarshaler - - // marshal is used to marshal the value into the - // given field. The value passed as its first argument is not - // a pointer type, but it is addressable. - marshal marshaler - - // makeResult is the resultMaker that will be - // passed into the unmarshaler. - makeResult resultMaker - - // isPointer is true if the field is pointer to the underlying type. - isPointer bool -} - -// getRequestType is like parseRequestType except that -// it returns the cached requestType when possible, -// adding the type to the cache otherwise. -func getRequestType(t reflect.Type) (*requestType, error) { - typeMutex.RLock() - pt := typeMap[t] - typeMutex.RUnlock() - if pt != nil { - return pt, nil - } - typeMutex.Lock() - defer typeMutex.Unlock() - if pt = typeMap[t]; pt != nil { - // The type has been parsed after we dropped - // the read lock, so use it. - return pt, nil - } - pt, err := parseRequestType(t) - if err != nil { - return nil, errgo.Mask(err) - } - typeMap[t] = pt - return pt, nil -} - -// parseRequestType preprocesses the given type -// into a form that can be efficiently interpreted -// by Unmarshal. -func parseRequestType(t reflect.Type) (*requestType, error) { - if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { - return nil, fmt.Errorf("type is not pointer to struct") - } - - hasBody := false - var pt requestType - foundRoute := false - // taggedFieldIndex holds the index of most recent anonymous - // tagged field - we will skip any fields inside that. - // It is nil when we're not inside an anonymous tagged field. - var taggedFieldIndex []int - for _, f := range fields(t.Elem()) { - if f.PkgPath != "" && !f.Anonymous { - // Ignore non-anonymous unexported fields. - continue - } - if taggedFieldIndex != nil && withinIndex(f.Index, taggedFieldIndex) { - // Ignore fields within tagged anonymous fields. - continue - } - taggedFieldIndex = nil - if !foundRoute && f.Anonymous && f.Type == reflect.TypeOf(Route{}) { - var err error - pt.method, pt.path, err = parseRouteTag(f.Tag) - if err != nil { - return nil, errgo.Notef(err, "bad route tag %q", f.Tag) - } - foundRoute = true - continue - } - tag, err := parseTag(f.Tag, f.Name) - if err != nil { - return nil, errgo.Notef(err, "bad tag %q in field %s", f.Tag, f.Name) - } - switch tag.source { - case sourceFormBody: - pt.formBody = true - case sourceBody: - if hasBody { - return nil, errgo.New("more than one body field specified") - } - hasBody = true - } - if hasBody && pt.formBody { - return nil, errgo.New("cannot specify inbody field with a body field") - } - field := field{ - index: f.Index, - name: f.Name, - } - if f.Type.Kind() == reflect.Ptr { - // The field is a pointer, so when the value is set, - // we need to create a new pointer to put - // it into. - field.makeResult = makePointerResult - field.isPointer = true - f.Type = f.Type.Elem() - } else { - field.makeResult = makeValueResult - field.isPointer = false - } - - field.unmarshal, err = getUnmarshaler(tag, f.Type) - if err != nil { - return nil, errgo.Mask(err) - } - - field.marshal, err = getMarshaler(tag, f.Type) - if err != nil { - return nil, errgo.Mask(err) - } - - if f.Anonymous && tag.source != sourceNone { - taggedFieldIndex = f.Index - } - pt.fields = append(pt.fields, field) - } - return &pt, nil -} - -// withinIndex reports whether the field with index i0 should be -// considered to be within the field with index i1. -func withinIndex(i0, i1 []int) bool { - // The index of a field within an anonymous field is formed by - // appending its field offset to the anonymous field's index, so - // it is sufficient that we check that i0 is prefixed by i1. - if len(i0) < len(i1) { - return false - } - for i := range i1 { - if i0[i] != i1[i] { - return false - } - } - return true -} - -// Note: we deliberately omit HEAD and OPTIONS -// from this list. HEAD will be routed through GET handlers -// and OPTIONS is handled separately. -var validMethod = map[string]bool{ - "PUT": true, - "POST": true, - "DELETE": true, - "GET": true, - "PATCH": true, -} - -func parseRouteTag(tag reflect.StructTag) (method, path string, err error) { - tagStr := tag.Get("httprequest") - if tagStr == "" { - return "", "", errgo.New("no httprequest tag") - } - f := strings.Fields(tagStr) - switch len(f) { - case 2: - path = f[1] - fallthrough - case 1: - method = f[0] - default: - return "", "", errgo.New("wrong field count") - } - if !validMethod[method] { - return "", "", errgo.Newf("invalid method") - } - // TODO check that path looks valid - return method, path, nil -} - -func makePointerResult(v reflect.Value) reflect.Value { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return v.Elem() -} - -func makeValueResult(v reflect.Value) reflect.Value { - return v -} - -type tagSource uint8 - -const ( - sourceNone = iota - sourcePath - sourceForm - sourceFormBody - sourceBody - sourceHeader -) - -type tag struct { - name string - source tagSource - omitempty bool -} - -// parseTag parses the given struct tag attached to the given -// field name into a tag structure. -func parseTag(rtag reflect.StructTag, fieldName string) (tag, error) { - t := tag{ - name: fieldName, - } - tagStr := rtag.Get("httprequest") - if tagStr == "" { - return t, nil - } - fields := strings.Split(tagStr, ",") - if fields[0] != "" { - t.name = fields[0] - } - inBody := false - for _, f := range fields[1:] { - switch f { - case "path": - t.source = sourcePath - case "form": - t.source = sourceForm - case "inbody": - inBody = true - case "body": - t.source = sourceBody - case "header": - t.source = sourceHeader - case "omitempty": - t.omitempty = true - default: - return tag{}, fmt.Errorf("unknown tag flag %q", f) - } - } - if t.omitempty && t.source != sourceForm && t.source != sourceHeader { - return tag{}, fmt.Errorf("can only use omitempty with form or header fields") - } - if inBody { - if t.source != sourceForm { - return tag{}, fmt.Errorf("can only use inbody with form field") - } - t.source = sourceFormBody - } - return t, nil -} - -// fields returns all the fields in the given struct type -// including fields inside anonymous struct members. -// The fields are ordered with top level fields first -// followed by the members of those fields -// for anonymous fields. -func fields(t reflect.Type) []reflect.StructField { - byName := make(map[string]reflect.StructField) - addFields(t, byName, nil) - fields := make(fieldsByIndex, 0, len(byName)) - for _, f := range byName { - if f.Name != "" { - fields = append(fields, f) - } - } - sort.Sort(fields) - return fields -} - -func addFields(t reflect.Type, byName map[string]reflect.StructField, index []int) { - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - index := append(index, i) - var add bool - old, ok := byName[f.Name] - switch { - case ok && len(old.Index) == len(index): - // Fields with the same name at the same depth - // cancel one another out. Set the field name - // to empty to signify that has happened. - old.Name = "" - byName[f.Name] = old - add = false - case ok: - // Fields at less depth win. - add = len(index) < len(old.Index) - default: - // The field did not previously exist. - add = true - } - if add { - // copy the index so that it's not overwritten - // by the other appends. - f.Index = append([]int(nil), index...) - byName[f.Name] = f - } - if f.Anonymous { - if f.Type.Kind() == reflect.Ptr { - f.Type = f.Type.Elem() - } - if f.Type.Kind() == reflect.Struct { - addFields(f.Type, byName, index) - } - } - } -} - -type fieldsByIndex []reflect.StructField - -func (f fieldsByIndex) Len() int { - return len(f) -} - -func (f fieldsByIndex) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -func (f fieldsByIndex) Less(i, j int) bool { - indexi, indexj := f[i].Index, f[j].Index - for len(indexi) != 0 && len(indexj) != 0 { - ii, ij := indexi[0], indexj[0] - if ii != ij { - return ii < ij - } - indexi, indexj = indexi[1:], indexj[1:] - } - return len(indexi) < len(indexj) -} diff --git a/vendor/gopkg.in/httprequest.v1/unmarshal.go b/vendor/gopkg.in/httprequest.v1/unmarshal.go deleted file mode 100644 index 77bf2409..00000000 --- a/vendor/gopkg.in/httprequest.v1/unmarshal.go +++ /dev/null @@ -1,262 +0,0 @@ -package httprequest - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "reflect" - - "gopkg.in/errgo.v1" -) - -var ( - ErrUnmarshal = errgo.New("httprequest unmarshal error") - ErrBadUnmarshalType = errgo.New("httprequest bad unmarshal type") -) - -// Unmarshal takes values from given parameters and fills -// out fields in x, which must be a pointer to a struct. -// -// Tags on the struct's fields determine where each field is filled in -// from. Similar to encoding/json and other encoding packages, the tag -// holds a comma-separated list. The first item in the list is an -// alternative name for the field (the field name itself will be used if -// this is empty). The next item specifies where the field is filled in -// from. It may be: -// -// "path" - the field is taken from a parameter in p.PathVar -// with a matching field name. -// -// "form" - the field is taken from the given name in p.Request.Form -// (note that this covers both URL query parameters and -// POST form parameters). -// -// "header" - the field is taken from the given name in -// p.Request.Header. -// -// "body" - the field is filled in by parsing the request body -// as JSON. -// -// For path and form parameters, the field will be filled out from -// the field in p.PathVar or p.Form using one of the following -// methods (in descending order of preference): -// -// - if the type is string, it will be set from the first value. -// -// - if the type is []string, it will be filled out using all values for that field -// (allowed only for form) -// -// - if the type implements encoding.TextUnmarshaler, its -// UnmarshalText method will be used -// -// - otherwise fmt.Sscan will be used to set the value. -// -// When the unmarshaling fails, Unmarshal returns an error with an -// ErrUnmarshal cause. If the type of x is inappropriate, -// it returns an error with an ErrBadUnmarshalType cause. -func Unmarshal(p Params, x interface{}) error { - xv := reflect.ValueOf(x) - pt, err := getRequestType(xv.Type()) - if err != nil { - return errgo.WithCausef(err, ErrBadUnmarshalType, "bad type %s", xv.Type()) - } - if err := unmarshal(p, xv, pt); err != nil { - return errgo.Mask(err, errgo.Is(ErrUnmarshal)) - } - return nil -} - -// unmarshal is the internal version of Unmarshal. -func unmarshal(p Params, xv reflect.Value, pt *requestType) error { - xv = xv.Elem() - for _, f := range pt.fields { - fv := xv.FieldByIndex(f.index) - if err := f.unmarshal(fv, p, f.makeResult); err != nil { - return errgo.WithCausef(err, ErrUnmarshal, "cannot unmarshal into field %s", f.name) - } - } - return nil -} - -// getUnmarshaler returns an unmarshaler function -// suitable for unmarshaling a field with the given tag -// into a value of the given type. -func getUnmarshaler(tag tag, t reflect.Type) (unmarshaler, error) { - switch { - case tag.source == sourceNone: - return unmarshalNop, nil - case tag.source == sourceBody: - return unmarshalBody, nil - case t == reflect.TypeOf([]string(nil)): - switch tag.source { - default: - return nil, errgo.New("invalid target type []string for path parameter") - case sourceForm, sourceFormBody: - return unmarshalAllForm(tag.name), nil - case sourceHeader: - return unmarshalAllHeader(tag.name), nil - } - case t == reflect.TypeOf(""): - return unmarshalString(tag), nil - case implementsTextUnmarshaler(t): - return unmarshalWithUnmarshalText(t, tag), nil - default: - return unmarshalWithScan(tag), nil - } -} - -// unmarshalNop just creates the result value but does not -// fill it out with anything. This is used to create pointers -// to new anonymous field members. -func unmarshalNop(v reflect.Value, p Params, makeResult resultMaker) error { - makeResult(v) - return nil -} - -// unmarshalAllForm unmarshals all the form fields for a given -// attribute into a []string slice. -func unmarshalAllForm(name string) unmarshaler { - return func(v reflect.Value, p Params, makeResult resultMaker) error { - vals := p.Request.Form[name] - if len(vals) > 0 { - makeResult(v).Set(reflect.ValueOf(vals)) - } - return nil - } -} - -// unmarshalAllHeader unmarshals all the header fields for a given -// attribute into a []string slice. -func unmarshalAllHeader(name string) unmarshaler { - return func(v reflect.Value, p Params, makeResult resultMaker) error { - vals := p.Request.Header[name] - if len(vals) > 0 { - makeResult(v).Set(reflect.ValueOf(vals)) - } - return nil - } -} - -// unmarshalString unmarshals into a string field. -func unmarshalString(tag tag) unmarshaler { - getVal := formGetters[tag.source] - if getVal == nil { - panic("unexpected source") - } - return func(v reflect.Value, p Params, makeResult resultMaker) error { - val, ok := getVal(tag.name, p) - if ok { - makeResult(v).SetString(val) - } - return nil - } -} - -// unmarshalBody unmarshals the http request body -// into the given value. -func unmarshalBody(v reflect.Value, p Params, makeResult resultMaker) error { - if !isJSONMediaType(p.Request.Header) { - fancyErr := newFancyDecodeError(p.Request.Header, p.Request.Body) - - return newDecodeRequestError(p.Request, fancyErr.body, fancyErr) - } - data, err := ioutil.ReadAll(p.Request.Body) - if err != nil { - return errgo.Notef(err, "cannot read request body") - } - // TODO allow body types that aren't necessarily JSON. - result := makeResult(v) - if err := json.Unmarshal(data, result.Addr().Interface()); err != nil { - return errgo.Notef(err, "cannot unmarshal request body") - } - return nil -} - -// formGetters maps from source to a function that -// returns the value for a given key and reports -// whether the value was found. -var formGetters = []func(name string, p Params) (string, bool){ - sourceForm: getFromForm, - sourceFormBody: getFromForm, - sourcePath: func(name string, p Params) (string, bool) { - for _, pv := range p.PathVar { - if pv.Key == name { - return pv.Value, true - } - } - return "", false - }, - sourceBody: nil, - sourceHeader: func(name string, p Params) (string, bool) { - vs := p.Request.Header[name] - if len(vs) == 0 { - return "", false - } - return vs[0], true - }, -} - -func getFromForm(name string, p Params) (string, bool) { - vs := p.Request.Form[name] - if len(vs) == 0 { - return "", false - } - return vs[0], true -} - -// encodingTextUnmarshaler is the same as encoding.TextUnmarshaler -// but avoids us importing the encoding package, which some -// broken gccgo installations do not allow. -// TODO remove this and use encoding.TextUnmarshaler instead. -type encodingTextUnmarshaler interface { - UnmarshalText(text []byte) error -} - -var textUnmarshalerType = reflect.TypeOf((*encodingTextUnmarshaler)(nil)).Elem() - -func implementsTextUnmarshaler(t reflect.Type) bool { - // Use the pointer type, because a pointer - // type will implement a superset of the methods - // of a non-pointer type. - return reflect.PtrTo(t).Implements(textUnmarshalerType) -} - -// unmarshalWithUnmarshalText returns an unmarshaler -// that unmarshals the given type from the given tag -// using its UnmarshalText method. -func unmarshalWithUnmarshalText(t reflect.Type, tag tag) unmarshaler { - getVal := formGetters[tag.source] - if getVal == nil { - panic("unexpected source") - } - return func(v reflect.Value, p Params, makeResult resultMaker) error { - val, ok := getVal(tag.name, p) - if !ok { - // TODO allow specifying that a field is mandatory? - return nil - } - uv := makeResult(v).Addr().Interface().(encodingTextUnmarshaler) - return uv.UnmarshalText([]byte(val)) - } -} - -// unmarshalWithScan returns an unmarshaler -// that unmarshals the given tag using fmt.Scan. -func unmarshalWithScan(tag tag) unmarshaler { - formGet := formGetters[tag.source] - if formGet == nil { - panic("unexpected source") - } - return func(v reflect.Value, p Params, makeResult resultMaker) error { - val, ok := formGet(tag.name, p) - if !ok { - // TODO allow specifying that a field is mandatory? - return nil - } - _, err := fmt.Sscan(val, makeResult(v).Addr().Interface()) - if err != nil { - return errgo.Notef(err, "cannot parse %q into %s", val, v.Type()) - } - return nil - } -} diff --git a/vendor/gopkg.in/macaroon.v2/.gitignore b/vendor/gopkg.in/macaroon.v2/.gitignore deleted file mode 100644 index 9ed3b07c..00000000 --- a/vendor/gopkg.in/macaroon.v2/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.test diff --git a/vendor/gopkg.in/macaroon.v2/.travis.yml b/vendor/gopkg.in/macaroon.v2/.travis.yml deleted file mode 100644 index f9a991c4..00000000 --- a/vendor/gopkg.in/macaroon.v2/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -go_import_path: "gopkg.in/macaroon.v2" -go: - - "1.7" - - "1.10" -before_install: - - "go get github.com/rogpeppe/godeps" -install: - - "go get -d gopkg.in/macaroon.v2" - - "godeps -u $GOPATH/src/gopkg.in/macaroon.v2/dependencies.tsv" -script: go test ./... diff --git a/vendor/gopkg.in/macaroon.v2/LICENSE b/vendor/gopkg.in/macaroon.v2/LICENSE deleted file mode 100644 index 9525fc82..00000000 --- a/vendor/gopkg.in/macaroon.v2/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright © 2014, Roger Peppe -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of this project nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/macaroon.v2/README.md b/vendor/gopkg.in/macaroon.v2/README.md deleted file mode 100644 index de8284b6..00000000 --- a/vendor/gopkg.in/macaroon.v2/README.md +++ /dev/null @@ -1,355 +0,0 @@ -# macaroon --- - import "gopkg.in/macaroon.v2" - -The macaroon package implements macaroons as described in the paper "Macaroons: -Cookies with Contextual Caveats for Decentralized Authorization in the Cloud" -(http://theory.stanford.edu/~ataly/Papers/macaroons.pdf) - -See the macaroon bakery packages at http://godoc.org/gopkg.in/macaroon-bakery.v2 -for higher level services and operations that use macaroons. - -## Usage - -```go -const ( - TraceInvalid = TraceOpKind(iota) - - // TraceMakeKey represents the operation of calculating a - // fixed length root key from the variable length input key. - TraceMakeKey - - // TraceHash represents a keyed hash operation with one - // or two values. If there is only one value, it will be in Data1. - TraceHash - - // TraceBind represents the operation of binding a discharge macaroon - // to its primary macaroon. Data1 holds the signature of the primary - // macaroon. - TraceBind - - // TraceFail represents a verification failure. If present, this will always - // be the last operation in a trace. - TraceFail -) -``` - -#### func Base64Decode - -```go -func Base64Decode(data []byte) ([]byte, error) -``` -Base64Decode base64-decodes the given data. It accepts both standard and URL -encodings, both padded and unpadded. - -#### type Caveat - -```go -type Caveat struct { - // Id holds the id of the caveat. For first - // party caveats this holds the condition; - // for third party caveats this holds the encrypted - // third party caveat. - Id []byte - - // VerificationId holds the verification id. If this is - // non-empty, it's a third party caveat. - VerificationId []byte - - // For third-party caveats, Location holds the - // ocation hint. Note that this is not signature checked - // as part of the caveat, so should only - // be used as a hint. - Location string -} -``` - -Caveat holds a first person or third party caveat. - -#### type Macaroon - -```go -type Macaroon struct { -} -``` - -Macaroon holds a macaroon. See Fig. 7 of -http://theory.stanford.edu/~ataly/Papers/macaroons.pdf for a description of the -data contained within. Macaroons are mutable objects - use Clone as appropriate -to avoid unwanted mutation. - -#### func New - -```go -func New(rootKey, id []byte, loc string, version Version) (*Macaroon, error) -``` -New returns a new macaroon with the given root key, identifier, location and -version. - -#### func (*Macaroon) AddFirstPartyCaveat - -```go -func (m *Macaroon) AddFirstPartyCaveat(condition []byte) error -``` -AddFirstPartyCaveat adds a caveat that will be verified by the target service. - -#### func (*Macaroon) AddThirdPartyCaveat - -```go -func (m *Macaroon) AddThirdPartyCaveat(rootKey, caveatId []byte, loc string) error -``` -AddThirdPartyCaveat adds a third-party caveat to the macaroon, using the given -shared root key, caveat id and location hint. The caveat id should encode the -root key in some way, either by encrypting it with a key known to the third -party or by holding a reference to it stored in the third party's storage. - -#### func (*Macaroon) Bind - -```go -func (m *Macaroon) Bind(sig []byte) -``` -Bind prepares the macaroon for being used to discharge the macaroon with the -given signature sig. This must be used before it is used in the discharges -argument to Verify. - -#### func (*Macaroon) Caveats - -```go -func (m *Macaroon) Caveats() []Caveat -``` -Caveats returns the macaroon's caveats. This method will probably change, and -it's important not to change the returned caveat. - -#### func (*Macaroon) Clone - -```go -func (m *Macaroon) Clone() *Macaroon -``` -Clone returns a copy of the receiving macaroon. - -#### func (*Macaroon) Id - -```go -func (m *Macaroon) Id() []byte -``` -Id returns the id of the macaroon. This can hold arbitrary information. - -#### func (*Macaroon) Location - -```go -func (m *Macaroon) Location() string -``` -Location returns the macaroon's location hint. This is not verified as part of -the macaroon. - -#### func (*Macaroon) MarshalBinary - -```go -func (m *Macaroon) MarshalBinary() ([]byte, error) -``` -MarshalBinary implements encoding.BinaryMarshaler by formatting the macaroon -according to the version specified by MarshalAs. - -#### func (*Macaroon) MarshalJSON - -```go -func (m *Macaroon) MarshalJSON() ([]byte, error) -``` -MarshalJSON implements json.Marshaler by marshaling the macaroon in JSON format. -The serialisation format is determined by the macaroon's version. - -#### func (*Macaroon) SetLocation - -```go -func (m *Macaroon) SetLocation(loc string) -``` -SetLocation sets the location associated with the macaroon. Note that the -location is not included in the macaroon's hash chain, so this does not change -the signature. - -#### func (*Macaroon) Signature - -```go -func (m *Macaroon) Signature() []byte -``` -Signature returns the macaroon's signature. - -#### func (*Macaroon) TraceVerify - -```go -func (m *Macaroon) TraceVerify(rootKey []byte, discharges []*Macaroon) ([]Trace, error) -``` -TraceVerify verifies the signature of the macaroon without checking any of the -first party caveats, and returns a slice of Traces holding the operations used -when verifying the macaroons. - -Each element in the returned slice corresponds to the operation for one of the -argument macaroons, with m at index 0, and discharges at 1 onwards. - -#### func (*Macaroon) UnmarshalBinary - -```go -func (m *Macaroon) UnmarshalBinary(data []byte) error -``` -UnmarshalBinary implements encoding.BinaryUnmarshaler. It accepts both V1 and V2 -binary encodings. - -#### func (*Macaroon) UnmarshalJSON - -```go -func (m *Macaroon) UnmarshalJSON(data []byte) error -``` -UnmarshalJSON implements json.Unmarshaller by unmarshaling the given macaroon in -JSON format. It accepts both V1 and V2 forms encoded forms, and also a -base64-encoded JSON string containing the binary-marshaled macaroon. - -After unmarshaling, the macaroon's version will reflect the version that it was -unmarshaled as. - -#### func (*Macaroon) Verify - -```go -func (m *Macaroon) Verify(rootKey []byte, check func(caveat string) error, discharges []*Macaroon) error -``` -Verify verifies that the receiving macaroon is valid. The root key must be the -same that the macaroon was originally minted with. The check function is called -to verify each first-party caveat - it should return an error if the condition -is not met. - -The discharge macaroons should be provided in discharges. - -Verify returns nil if the verification succeeds. - -#### func (*Macaroon) VerifySignature - -```go -func (m *Macaroon) VerifySignature(rootKey []byte, discharges []*Macaroon) ([]string, error) -``` -VerifySignature verifies the signature of the given macaroon with respect to the -root key, but it does not validate any first-party caveats. Instead it returns -all the applicable first party caveats on success. - -The caller is responsible for checking the returned first party caveat -conditions. - -#### func (*Macaroon) Version - -```go -func (m *Macaroon) Version() Version -``` -Version returns the version of the macaroon. - -#### type Slice - -```go -type Slice []*Macaroon -``` - -Slice defines a collection of macaroons. By convention, the first macaroon in -the slice is a primary macaroon and the rest are discharges for its third party -caveats. - -#### func (Slice) MarshalBinary - -```go -func (s Slice) MarshalBinary() ([]byte, error) -``` -MarshalBinary implements encoding.BinaryMarshaler. - -#### func (*Slice) UnmarshalBinary - -```go -func (s *Slice) UnmarshalBinary(data []byte) error -``` -UnmarshalBinary implements encoding.BinaryUnmarshaler. It accepts all known -binary encodings for the data - all the embedded macaroons need not be encoded -in the same format. - -#### type Trace - -```go -type Trace struct { - RootKey []byte - Ops []TraceOp -} -``` - -Trace holds all toperations involved in verifying a macaroon, and the root key -used as the initial verification key. This can be useful for debugging macaroon -implementations. - -#### func (Trace) Results - -```go -func (t Trace) Results() [][]byte -``` -Results returns the output from all operations in the Trace. The result from -ts.Ops[i] will be in the i'th element of the returned slice. When a trace has -resulted in a failure, the last element will be nil. - -#### type TraceOp - -```go -type TraceOp struct { - Kind TraceOpKind `json:"kind"` - Data1 []byte `json:"data1,omitempty"` - Data2 []byte `json:"data2,omitempty"` -} -``` - -TraceOp holds one possible operation when verifying a macaroon. - -#### func (TraceOp) Result - -```go -func (op TraceOp) Result(input []byte) []byte -``` -Result returns the result of computing the given operation with the given input -data. If op is TraceFail, it returns nil. - -#### type TraceOpKind - -```go -type TraceOpKind int -``` - -TraceOpKind represents the kind of a macaroon verification operation. - -#### func (TraceOpKind) String - -```go -func (k TraceOpKind) String() string -``` -String returns a string representation of the operation. - -#### type Version - -```go -type Version uint16 -``` - -Version specifies the version of a macaroon. In version 1, the macaroon id and -all caveats must be UTF-8-compatible strings, and the size of any part of the -macaroon may not exceed approximately 64K. In version 2, all field may be -arbitrary binary blobs. - -```go -const ( - // V1 specifies version 1 macaroons. - V1 Version = 1 - - // V2 specifies version 2 macaroons. - V2 Version = 2 - - // LatestVersion holds the latest supported version. - LatestVersion = V2 -) -``` - -#### func (Version) String - -```go -func (v Version) String() string -``` -String returns a string representation of the version; for example V1 formats as -"v1". diff --git a/vendor/gopkg.in/macaroon.v2/TODO b/vendor/gopkg.in/macaroon.v2/TODO deleted file mode 100644 index f272ce3e..00000000 --- a/vendor/gopkg.in/macaroon.v2/TODO +++ /dev/null @@ -1,4 +0,0 @@ -macaroon: - - - verify that all signature calculations to correspond exactly - with libmacaroons. diff --git a/vendor/gopkg.in/macaroon.v2/crypto.go b/vendor/gopkg.in/macaroon.v2/crypto.go deleted file mode 100644 index c7e98e51..00000000 --- a/vendor/gopkg.in/macaroon.v2/crypto.go +++ /dev/null @@ -1,91 +0,0 @@ -package macaroon - -import ( - "crypto/hmac" - "crypto/sha256" - "fmt" - "hash" - "io" - - "golang.org/x/crypto/nacl/secretbox" -) - -func keyedHash(key *[hashLen]byte, text []byte) *[hashLen]byte { - h := keyedHasher(key) - h.Write([]byte(text)) - var sum [hashLen]byte - hashSum(h, &sum) - return &sum -} - -func keyedHasher(key *[hashLen]byte) hash.Hash { - return hmac.New(sha256.New, key[:]) -} - -var keyGen = []byte("macaroons-key-generator") - -// makeKey derives a fixed length key from a variable -// length key. The keyGen constant is the same -// as that used in libmacaroons. -func makeKey(variableKey []byte) *[keyLen]byte { - h := hmac.New(sha256.New, keyGen) - h.Write(variableKey) - var key [keyLen]byte - hashSum(h, &key) - return &key -} - -// hashSum calls h.Sum to put the sum into -// the given destination. It also sanity -// checks that the result really is the expected -// size. -func hashSum(h hash.Hash, dest *[hashLen]byte) { - r := h.Sum(dest[:0]) - if len(r) != len(dest) { - panic("hash size inconsistency") - } -} - -const ( - keyLen = 32 - nonceLen = 24 - hashLen = sha256.Size -) - -func newNonce(r io.Reader) (*[nonceLen]byte, error) { - var nonce [nonceLen]byte - _, err := r.Read(nonce[:]) - if err != nil { - return nil, fmt.Errorf("cannot generate random bytes: %v", err) - } - return &nonce, nil -} - -func encrypt(key *[keyLen]byte, text *[hashLen]byte, r io.Reader) ([]byte, error) { - nonce, err := newNonce(r) - if err != nil { - return nil, err - } - out := make([]byte, 0, len(nonce)+secretbox.Overhead+len(text)) - out = append(out, nonce[:]...) - return secretbox.Seal(out, text[:], nonce, key), nil -} - -func decrypt(key *[keyLen]byte, ciphertext []byte) (*[hashLen]byte, error) { - if len(ciphertext) < nonceLen+secretbox.Overhead { - return nil, fmt.Errorf("message too short") - } - var nonce [nonceLen]byte - copy(nonce[:], ciphertext) - ciphertext = ciphertext[nonceLen:] - text, ok := secretbox.Open(nil, ciphertext, &nonce, key) - if !ok { - return nil, fmt.Errorf("decryption failure") - } - if len(text) != hashLen { - return nil, fmt.Errorf("decrypted text is wrong length") - } - var rtext [hashLen]byte - copy(rtext[:], text) - return &rtext, nil -} diff --git a/vendor/gopkg.in/macaroon.v2/dependencies.tsv b/vendor/gopkg.in/macaroon.v2/dependencies.tsv deleted file mode 100644 index ba3c4e0e..00000000 --- a/vendor/gopkg.in/macaroon.v2/dependencies.tsv +++ /dev/null @@ -1,5 +0,0 @@ -github.com/frankban/quicktest git 2c6a0d60c05cd2d970f356eee0623ddf1cd0d62d 2018-02-06T12:35:47Z -github.com/google/go-cmp git 5411ab924f9ffa6566244a9e504bc347edacffd3 2018-03-28T20:15:12Z -github.com/kr/pretty git cfb55aafdaf3ec08f0db22699ab822c50091b1c4 2016-08-23T17:07:15Z -github.com/kr/text git 7cafcd837844e784b526369c9bce262804aebc60 2016-05-04T23:40:17Z -golang.org/x/crypto git 96846453c37f0876340a66a47f3f75b1f3a6cd2d 2017-04-21T04:31:20Z diff --git a/vendor/gopkg.in/macaroon.v2/macaroon.go b/vendor/gopkg.in/macaroon.v2/macaroon.go deleted file mode 100644 index d730f892..00000000 --- a/vendor/gopkg.in/macaroon.v2/macaroon.go +++ /dev/null @@ -1,399 +0,0 @@ -// The macaroon package implements macaroons as described in -// the paper "Macaroons: Cookies with Contextual Caveats for -// Decentralized Authorization in the Cloud" -// (http://theory.stanford.edu/~ataly/Papers/macaroons.pdf) -// -// See the macaroon bakery packages at http://godoc.org/gopkg.in/macaroon-bakery.v2 -// for higher level services and operations that use macaroons. -package macaroon - -import ( - "bytes" - "crypto/hmac" - "crypto/rand" - "fmt" - "io" - "unicode/utf8" -) - -// Macaroon holds a macaroon. -// See Fig. 7 of http://theory.stanford.edu/~ataly/Papers/macaroons.pdf -// for a description of the data contained within. -// Macaroons are mutable objects - use Clone as appropriate -// to avoid unwanted mutation. -type Macaroon struct { - location string - id []byte - caveats []Caveat - sig [hashLen]byte - version Version -} - -// Equal reports whether m has exactly the same content as m1. -func (m *Macaroon) Equal(m1 *Macaroon) bool { - if m == m1 || m == nil || m1 == nil { - return m == m1 - } - if m.location != m1.location || - !bytes.Equal(m.id, m1.id) || - m.sig != m1.sig || - m.version != m1.version || - len(m.caveats) != len(m1.caveats) { - return false - } - for i, c := range m.caveats { - if !c.Equal(m1.caveats[i]) { - return false - } - } - return true -} - -// Caveat holds a first party or third party caveat. -type Caveat struct { - // Id holds the id of the caveat. For first - // party caveats this holds the condition; - // for third party caveats this holds the encrypted - // third party caveat. - Id []byte - - // VerificationId holds the verification id. If this is - // non-empty, it's a third party caveat. - VerificationId []byte - - // For third-party caveats, Location holds the - // ocation hint. Note that this is not signature checked - // as part of the caveat, so should only - // be used as a hint. - Location string -} - -// Equal reports whether c is equal to c1. -func (c Caveat) Equal(c1 Caveat) bool { - return bytes.Equal(c.Id, c1.Id) && - bytes.Equal(c.VerificationId, c1.VerificationId) && - c.Location == c1.Location -} - -// isThirdParty reports whether the caveat must be satisfied -// by some third party (if not, it's a first person caveat). -func (cav *Caveat) isThirdParty() bool { - return len(cav.VerificationId) > 0 -} - -// New returns a new macaroon with the given root key, -// identifier, location and version. -func New(rootKey, id []byte, loc string, version Version) (*Macaroon, error) { - var m Macaroon - if version < V2 { - if !utf8.Valid(id) { - return nil, fmt.Errorf("invalid id for %v macaroon", id) - } - // TODO check id length too. - } - if version < V1 || version > LatestVersion { - return nil, fmt.Errorf("invalid version %v", version) - } - m.version = version - m.init(append([]byte(nil), id...), loc, version) - derivedKey := makeKey(rootKey) - m.sig = *keyedHash(derivedKey, m.id) - return &m, nil -} - -// init initializes the macaroon. It retains a reference to id. -func (m *Macaroon) init(id []byte, loc string, vers Version) { - m.location = loc - m.id = append([]byte(nil), id...) - m.version = vers -} - -// SetLocation sets the location associated with the macaroon. -// Note that the location is not included in the macaroon's -// hash chain, so this does not change the signature. -func (m *Macaroon) SetLocation(loc string) { - m.location = loc -} - -// Clone returns a copy of the receiving macaroon. -func (m *Macaroon) Clone() *Macaroon { - m1 := *m - // Ensure that if any caveats are appended to the new - // macaroon, it will copy the caveats. - m1.caveats = m1.caveats[0:len(m1.caveats):len(m1.caveats)] - return &m1 -} - -// Location returns the macaroon's location hint. This is -// not verified as part of the macaroon. -func (m *Macaroon) Location() string { - return m.location -} - -// Id returns the id of the macaroon. This can hold -// arbitrary information. -func (m *Macaroon) Id() []byte { - return append([]byte(nil), m.id...) -} - -// Signature returns the macaroon's signature. -func (m *Macaroon) Signature() []byte { - // sig := m.sig - // return sig[:] - // Work around https://github.com/golang/go/issues/9537 - sig := new([hashLen]byte) - *sig = m.sig - return sig[:] -} - -// Caveats returns the macaroon's caveats. -// This method will probably change, and it's important not to change the returned caveat. -func (m *Macaroon) Caveats() []Caveat { - return m.caveats[0:len(m.caveats):len(m.caveats)] -} - -// appendCaveat appends a caveat without modifying the macaroon's signature. -func (m *Macaroon) appendCaveat(caveatId, verificationId []byte, loc string) { - if len(verificationId) == 0 { - // Ensure that an empty vid is always represented by nil, - // so that marshalers don't procuce spurious zero-length - // vid fields which can confuse some verifiers. - verificationId = nil - } - m.caveats = append(m.caveats, Caveat{ - Id: caveatId, - VerificationId: verificationId, - Location: loc, - }) -} - -func (m *Macaroon) addCaveat(caveatId, verificationId []byte, loc string) error { - if m.version < V2 { - if !utf8.Valid(caveatId) { - return fmt.Errorf("invalid caveat id for %v macaroon", m.version) - } - // TODO check caveat length too. - } - m.appendCaveat(caveatId, verificationId, loc) - if len(verificationId) == 0 { - m.sig = *keyedHash(&m.sig, caveatId) - } else { - m.sig = *keyedHash2(&m.sig, verificationId, caveatId) - } - return nil -} - -func keyedHash2(key *[keyLen]byte, d1, d2 []byte) *[hashLen]byte { - var data [hashLen * 2]byte - copy(data[0:], keyedHash(key, d1)[:]) - copy(data[hashLen:], keyedHash(key, d2)[:]) - return keyedHash(key, data[:]) -} - -// Bind prepares the macaroon for being used to discharge the -// macaroon with the given signature sig. This must be -// used before it is used in the discharges argument to Verify. -func (m *Macaroon) Bind(sig []byte) { - m.sig = *bindForRequest(sig, &m.sig) -} - -// AddFirstPartyCaveat adds a caveat that will be verified -// by the target service. -func (m *Macaroon) AddFirstPartyCaveat(condition []byte) error { - m.addCaveat(condition, nil, "") - return nil -} - -// AddThirdPartyCaveat adds a third-party caveat to the macaroon, -// using the given shared root key, caveat id and location hint. -// The caveat id should encode the root key in some -// way, either by encrypting it with a key known to the third party -// or by holding a reference to it stored in the third party's -// storage. -func (m *Macaroon) AddThirdPartyCaveat(rootKey, caveatId []byte, loc string) error { - return m.addThirdPartyCaveatWithRand(rootKey, caveatId, loc, rand.Reader) -} - -// addThirdPartyCaveatWithRand adds a third-party caveat to the macaroon, using -// the given source of randomness for encrypting the caveat id. -func (m *Macaroon) addThirdPartyCaveatWithRand(rootKey, caveatId []byte, loc string, r io.Reader) error { - derivedKey := makeKey(rootKey) - verificationId, err := encrypt(&m.sig, derivedKey, r) - if err != nil { - return err - } - m.addCaveat(caveatId, verificationId, loc) - return nil -} - -var zeroKey [hashLen]byte - -// bindForRequest binds the given macaroon -// to the given signature of its parent macaroon. -func bindForRequest(rootSig []byte, dischargeSig *[hashLen]byte) *[hashLen]byte { - if bytes.Equal(rootSig, dischargeSig[:]) { - return dischargeSig - } - return keyedHash2(&zeroKey, rootSig, dischargeSig[:]) -} - -// Verify verifies that the receiving macaroon is valid. -// The root key must be the same that the macaroon was originally -// minted with. The check function is called to verify each -// first-party caveat - it should return an error if the -// condition is not met. -// -// The discharge macaroons should be provided in discharges. -// -// Verify returns nil if the verification succeeds. -func (m *Macaroon) Verify(rootKey []byte, check func(caveat string) error, discharges []*Macaroon) error { - var vctx verificationContext - vctx.init(rootKey, m, discharges, check) - return vctx.verify(m, rootKey) -} - -// VerifySignature verifies the signature of the given macaroon with respect -// to the root key, but it does not validate any first-party caveats. Instead -// it returns all the applicable first party caveats on success. -// -// The caller is responsible for checking the returned first party caveat -// conditions. -func (m *Macaroon) VerifySignature(rootKey []byte, discharges []*Macaroon) ([]string, error) { - n := len(m.caveats) - for _, dm := range discharges { - n += len(dm.caveats) - } - conds := make([]string, 0, n) - var vctx verificationContext - vctx.init(rootKey, m, discharges, func(cond string) error { - conds = append(conds, cond) - return nil - }) - err := vctx.verify(m, rootKey) - if err != nil { - return nil, err - } - return conds, nil -} - -// TraceVerify verifies the signature of the macaroon without checking -// any of the first party caveats, and returns a slice of Traces holding -// the operations used when verifying the macaroons. -// -// Each element in the returned slice corresponds to the -// operation for one of the argument macaroons, with m at index 0, -// and discharges at 1 onwards. -func (m *Macaroon) TraceVerify(rootKey []byte, discharges []*Macaroon) ([]Trace, error) { - var vctx verificationContext - vctx.init(rootKey, m, discharges, func(string) error { return nil }) - vctx.traces = make([]Trace, len(discharges)+1) - err := vctx.verify(m, rootKey) - return vctx.traces, err -} - -type verificationContext struct { - used []bool - discharges []*Macaroon - rootSig *[hashLen]byte - traces []Trace - check func(caveat string) error -} - -func (vctx *verificationContext) init(rootKey []byte, root *Macaroon, discharges []*Macaroon, check func(caveat string) error) { - *vctx = verificationContext{ - discharges: discharges, - used: make([]bool, len(discharges)), - rootSig: &root.sig, - check: check, - } -} - -func (vctx *verificationContext) verify(root *Macaroon, rootKey []byte) error { - vctx.traceRootKey(0, rootKey) - vctx.trace(0, TraceMakeKey, rootKey, nil) - derivedKey := makeKey(rootKey) - if err := vctx.verify0(root, 0, derivedKey); err != nil { - vctx.trace(0, TraceFail, nil, nil) - return err - } - for i, wasUsed := range vctx.used { - if !wasUsed { - vctx.trace(i+1, TraceFail, nil, nil) - return fmt.Errorf("discharge macaroon %q was not used", vctx.discharges[i].Id()) - } - } - return nil -} - -func (vctx *verificationContext) verify0(m *Macaroon, index int, rootKey *[hashLen]byte) error { - vctx.trace(index, TraceHash, m.id, nil) - caveatSig := keyedHash(rootKey, m.id) - for i, cav := range m.caveats { - if cav.isThirdParty() { - cavKey, err := decrypt(caveatSig, cav.VerificationId) - if err != nil { - return fmt.Errorf("failed to decrypt caveat %d signature: %v", i, err) - } - dm, di, err := vctx.findDischarge(cav.Id) - if err != nil { - return err - } - vctx.traceRootKey(di+1, cavKey[:]) - if err := vctx.verify0(dm, di+1, cavKey); err != nil { - vctx.trace(di+1, TraceFail, nil, nil) - return err - } - vctx.trace(index, TraceHash, cav.VerificationId, cav.Id) - caveatSig = keyedHash2(caveatSig, cav.VerificationId, cav.Id) - } else { - vctx.trace(index, TraceHash, cav.Id, nil) - caveatSig = keyedHash(caveatSig, cav.Id) - if err := vctx.check(string(cav.Id)); err != nil { - return err - } - } - } - if index > 0 { - vctx.trace(index, TraceBind, vctx.rootSig[:], caveatSig[:]) - caveatSig = bindForRequest(vctx.rootSig[:], caveatSig) - } - // TODO perhaps we should actually do this check before doing - // all the potentially expensive caveat checks. - if !hmac.Equal(caveatSig[:], m.sig[:]) { - return fmt.Errorf("signature mismatch after caveat verification") - } - return nil -} - -func (vctx *verificationContext) findDischarge(id []byte) (dm *Macaroon, index int, err error) { - for di, dm := range vctx.discharges { - if !bytes.Equal(dm.id, id) { - continue - } - // Don't use a discharge macaroon more than once. - // It's important that we do this check here rather than after - // verify as it prevents potentially infinite recursion. - if vctx.used[di] { - return nil, 0, fmt.Errorf("discharge macaroon %q was used more than once", dm.Id()) - } - vctx.used[di] = true - return dm, di, nil - } - return nil, 0, fmt.Errorf("cannot find discharge macaroon for caveat %x", id) -} - -func (vctx *verificationContext) trace(index int, op TraceOpKind, data1, data2 []byte) { - if vctx.traces != nil { - vctx.traces[index].Ops = append(vctx.traces[index].Ops, TraceOp{ - Kind: op, - Data1: data1, - Data2: data2, - }) - } -} - -func (vctx *verificationContext) traceRootKey(index int, rootKey []byte) { - if vctx.traces != nil { - vctx.traces[index].RootKey = rootKey[:] - } -} diff --git a/vendor/gopkg.in/macaroon.v2/marshal-v1.go b/vendor/gopkg.in/macaroon.v2/marshal-v1.go deleted file mode 100644 index da20db37..00000000 --- a/vendor/gopkg.in/macaroon.v2/marshal-v1.go +++ /dev/null @@ -1,190 +0,0 @@ -package macaroon - -import ( - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - "unicode/utf8" -) - -// macaroonJSONV1 defines the V1 JSON format for macaroons. -type macaroonJSONV1 struct { - Caveats []caveatJSONV1 `json:"caveats"` - Location string `json:"location"` - Identifier string `json:"identifier"` - Signature string `json:"signature"` // hex-encoded -} - -// caveatJSONV1 defines the V1 JSON format for caveats within a macaroon. -type caveatJSONV1 struct { - CID string `json:"cid"` - VID string `json:"vid,omitempty"` - Location string `json:"cl,omitempty"` -} - -// marshalJSONV1 marshals the macaroon to the V1 JSON format. -func (m *Macaroon) marshalJSONV1() ([]byte, error) { - if !utf8.Valid(m.id) { - return nil, fmt.Errorf("macaroon id is not valid UTF-8") - } - mjson := macaroonJSONV1{ - Location: m.location, - Identifier: string(m.id), - Signature: hex.EncodeToString(m.sig[:]), - Caveats: make([]caveatJSONV1, len(m.caveats)), - } - for i, cav := range m.caveats { - if !utf8.Valid(cav.Id) { - return nil, fmt.Errorf("caveat id is not valid UTF-8") - } - mjson.Caveats[i] = caveatJSONV1{ - Location: cav.Location, - CID: string(cav.Id), - VID: base64.RawURLEncoding.EncodeToString(cav.VerificationId), - } - } - data, err := json.Marshal(mjson) - if err != nil { - return nil, fmt.Errorf("cannot marshal json data: %v", err) - } - return data, nil -} - -// initJSONV1 initializes m from the JSON-unmarshaled data -// held in mjson. -func (m *Macaroon) initJSONV1(mjson *macaroonJSONV1) error { - m.init([]byte(mjson.Identifier), mjson.Location, V1) - sig, err := hex.DecodeString(mjson.Signature) - if err != nil { - return fmt.Errorf("cannot decode macaroon signature %q: %v", m.sig, err) - } - if len(sig) != hashLen { - return fmt.Errorf("signature has unexpected length %d", len(sig)) - } - copy(m.sig[:], sig) - m.caveats = m.caveats[:0] - for _, cav := range mjson.Caveats { - vid, err := Base64Decode([]byte(cav.VID)) - if err != nil { - return fmt.Errorf("cannot decode verification id %q: %v", cav.VID, err) - } - m.appendCaveat([]byte(cav.CID), vid, cav.Location) - } - return nil -} - -// The original (v1) binary format of a macaroon is as follows. -// Each identifier represents a v1 packet. -// -// location -// identifier -// ( -// caveatId? -// verificationId? -// caveatLocation? -// )* -// signature - -// parseBinaryV1 parses the given data in V1 format into the macaroon. The macaroon's -// internal data structures will retain references to the data. It -// returns the data after the end of the macaroon. -func (m *Macaroon) parseBinaryV1(data []byte) ([]byte, error) { - var err error - - loc, err := expectPacketV1(data, fieldNameLocation) - if err != nil { - return nil, err - } - data = data[loc.totalLen:] - id, err := expectPacketV1(data, fieldNameIdentifier) - if err != nil { - return nil, err - } - data = data[id.totalLen:] - m.init(id.data, string(loc.data), V1) - var cav Caveat - for { - p, err := parsePacketV1(data) - if err != nil { - return nil, err - } - data = data[p.totalLen:] - switch field := string(p.fieldName); field { - case fieldNameSignature: - // At the end of the caveats we find the signature. - if cav.Id != nil { - m.caveats = append(m.caveats, cav) - } - if len(p.data) != hashLen { - return nil, fmt.Errorf("signature has unexpected length %d", len(p.data)) - } - copy(m.sig[:], p.data) - return data, nil - case fieldNameCaveatId: - if cav.Id != nil { - m.caveats = append(m.caveats, cav) - cav = Caveat{} - } - cav.Id = p.data - case fieldNameVerificationId: - if cav.VerificationId != nil { - return nil, fmt.Errorf("repeated field %q in caveat", fieldNameVerificationId) - } - cav.VerificationId = p.data - case fieldNameCaveatLocation: - if cav.Location != "" { - return nil, fmt.Errorf("repeated field %q in caveat", fieldNameLocation) - } - cav.Location = string(p.data) - default: - return nil, fmt.Errorf("unexpected field %q", field) - } - } -} - -func expectPacketV1(data []byte, kind string) (packetV1, error) { - p, err := parsePacketV1(data) - if err != nil { - return packetV1{}, err - } - if field := string(p.fieldName); field != kind { - return packetV1{}, fmt.Errorf("unexpected field %q; expected %s", field, kind) - } - return p, nil -} - -// appendBinaryV1 appends the binary encoding of m to data. -func (m *Macaroon) appendBinaryV1(data []byte) ([]byte, error) { - var ok bool - data, ok = appendPacketV1(data, fieldNameLocation, []byte(m.location)) - if !ok { - return nil, fmt.Errorf("failed to append location to macaroon, packet is too long") - } - data, ok = appendPacketV1(data, fieldNameIdentifier, m.id) - if !ok { - return nil, fmt.Errorf("failed to append identifier to macaroon, packet is too long") - } - for _, cav := range m.caveats { - data, ok = appendPacketV1(data, fieldNameCaveatId, cav.Id) - if !ok { - return nil, fmt.Errorf("failed to append caveat id to macaroon, packet is too long") - } - if cav.VerificationId == nil { - continue - } - data, ok = appendPacketV1(data, fieldNameVerificationId, cav.VerificationId) - if !ok { - return nil, fmt.Errorf("failed to append verification id to macaroon, packet is too long") - } - data, ok = appendPacketV1(data, fieldNameCaveatLocation, []byte(cav.Location)) - if !ok { - return nil, fmt.Errorf("failed to append verification id to macaroon, packet is too long") - } - } - data, ok = appendPacketV1(data, fieldNameSignature, m.sig[:]) - if !ok { - return nil, fmt.Errorf("failed to append signature to macaroon, packet is too long") - } - return data, nil -} diff --git a/vendor/gopkg.in/macaroon.v2/marshal-v2.go b/vendor/gopkg.in/macaroon.v2/marshal-v2.go deleted file mode 100644 index e02131f2..00000000 --- a/vendor/gopkg.in/macaroon.v2/marshal-v2.go +++ /dev/null @@ -1,253 +0,0 @@ -package macaroon - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "unicode/utf8" -) - -// macaroonJSONV2 defines the V2 JSON format for macaroons. -type macaroonJSONV2 struct { - Caveats []caveatJSONV2 `json:"c,omitempty"` - Location string `json:"l,omitempty"` - Identifier string `json:"i,omitempty"` - Identifier64 string `json:"i64,omitempty"` - Signature string `json:"s,omitempty"` - Signature64 string `json:"s64,omitempty"` -} - -// caveatJSONV2 defines the V2 JSON format for caveats within a macaroon. -type caveatJSONV2 struct { - CID string `json:"i,omitempty"` - CID64 string `json:"i64,omitempty"` - VID string `json:"v,omitempty"` - VID64 string `json:"v64,omitempty"` - Location string `json:"l,omitempty"` -} - -func (m *Macaroon) marshalJSONV2() ([]byte, error) { - mjson := macaroonJSONV2{ - Location: m.location, - Caveats: make([]caveatJSONV2, len(m.caveats)), - } - putJSONBinaryField(m.id, &mjson.Identifier, &mjson.Identifier64) - putJSONBinaryField(m.sig[:], &mjson.Signature, &mjson.Signature64) - for i, cav := range m.caveats { - cavjson := caveatJSONV2{ - Location: cav.Location, - } - putJSONBinaryField(cav.Id, &cavjson.CID, &cavjson.CID64) - putJSONBinaryField(cav.VerificationId, &cavjson.VID, &cavjson.VID64) - mjson.Caveats[i] = cavjson - } - data, err := json.Marshal(mjson) - if err != nil { - return nil, fmt.Errorf("cannot marshal json data: %v", err) - } - return data, nil -} - -// initJSONV2 initializes m from the JSON-unmarshaled data -// held in mjson. -func (m *Macaroon) initJSONV2(mjson *macaroonJSONV2) error { - id, err := jsonBinaryField(mjson.Identifier, mjson.Identifier64) - if err != nil { - return fmt.Errorf("invalid identifier: %v", err) - } - m.init(id, mjson.Location, V2) - sig, err := jsonBinaryField(mjson.Signature, mjson.Signature64) - if err != nil { - return fmt.Errorf("invalid signature: %v", err) - } - if len(sig) != hashLen { - return fmt.Errorf("signature has unexpected length %d", len(sig)) - } - copy(m.sig[:], sig) - m.caveats = make([]Caveat, 0, len(mjson.Caveats)) - for _, cav := range mjson.Caveats { - cid, err := jsonBinaryField(cav.CID, cav.CID64) - if err != nil { - return fmt.Errorf("invalid cid in caveat: %v", err) - } - vid, err := jsonBinaryField(cav.VID, cav.VID64) - if err != nil { - return fmt.Errorf("invalid vid in caveat: %v", err) - } - m.appendCaveat(cid, vid, cav.Location) - } - return nil -} - -// putJSONBinaryField puts the value of x into one -// of the appropriate fields depending on its value. -func putJSONBinaryField(x []byte, s, sb64 *string) { - if !utf8.Valid(x) { - *sb64 = base64.RawURLEncoding.EncodeToString(x) - return - } - // We could use either string or base64 encoding; - // choose the most compact of the two possibilities. - b64len := base64.RawURLEncoding.EncodedLen(len(x)) - sx := string(x) - if jsonEnc, _ := json.Marshal(sx); len(jsonEnc)-2 <= b64len+2 { - // The JSON encoding is smaller than the base 64 encoding. - // NB marshaling a string can never return an error; - // it always includes the two quote characters; - // but using base64 also uses two extra characters for the - // "64" suffix on the field name. If all is equal, prefer string - // encoding because it's more readable. - *s = sx - return - } - *sb64 = base64.RawURLEncoding.EncodeToString(x) -} - -// jsonBinaryField returns the value of a JSON field that may -// be string, hex or base64-encoded. -func jsonBinaryField(s, sb64 string) ([]byte, error) { - switch { - case s != "": - if sb64 != "" { - return nil, fmt.Errorf("ambiguous field encoding") - } - return []byte(s), nil - case sb64 != "": - return Base64Decode([]byte(sb64)) - } - return []byte{}, nil -} - -// The v2 binary format of a macaroon is as follows. -// All entries other than the version are packets as -// parsed by parsePacketV2. -// -// version [1 byte] -// location? -// identifier -// eos -// ( -// location? -// identifier -// verificationId? -// eos -// )* -// eos -// signature -// -// See also https://github.com/rescrv/libmacaroons/blob/master/doc/format.txt - -// parseBinaryV2 parses the given data in V2 format into the macaroon. The macaroon's -// internal data structures will retain references to the data. It -// returns the data after the end of the macaroon. -func (m *Macaroon) parseBinaryV2(data []byte) ([]byte, error) { - // The version has already been checked, so - // skip it. - data = data[1:] - - data, section, err := parseSectionV2(data) - if err != nil { - return nil, err - } - var loc string - if len(section) > 0 && section[0].fieldType == fieldLocation { - loc = string(section[0].data) - section = section[1:] - } - if len(section) != 1 || section[0].fieldType != fieldIdentifier { - return nil, fmt.Errorf("invalid macaroon header") - } - id := section[0].data - m.init(id, loc, V2) - for { - rest, section, err := parseSectionV2(data) - if err != nil { - return nil, err - } - data = rest - if len(section) == 0 { - break - } - var cav Caveat - if len(section) > 0 && section[0].fieldType == fieldLocation { - cav.Location = string(section[0].data) - section = section[1:] - } - if len(section) == 0 || section[0].fieldType != fieldIdentifier { - return nil, fmt.Errorf("no identifier in caveat") - } - cav.Id = section[0].data - section = section[1:] - if len(section) == 0 { - // First party caveat. - if cav.Location != "" { - return nil, fmt.Errorf("location not allowed in first party caveat") - } - m.caveats = append(m.caveats, cav) - continue - } - if len(section) != 1 { - return nil, fmt.Errorf("extra fields found in caveat") - } - if section[0].fieldType != fieldVerificationId { - return nil, fmt.Errorf("invalid field found in caveat") - } - cav.VerificationId = section[0].data - m.caveats = append(m.caveats, cav) - } - data, sig, err := parsePacketV2(data) - if err != nil { - return nil, err - } - if sig.fieldType != fieldSignature { - return nil, fmt.Errorf("unexpected field found instead of signature") - } - if len(sig.data) != hashLen { - return nil, fmt.Errorf("signature has unexpected length") - } - copy(m.sig[:], sig.data) - return data, nil -} - -// appendBinaryV2 appends the binary-encoded macaroon -// in v2 format to data. -func (m *Macaroon) appendBinaryV2(data []byte) []byte { - // Version byte. - data = append(data, 2) - if len(m.location) > 0 { - data = appendPacketV2(data, packetV2{ - fieldType: fieldLocation, - data: []byte(m.location), - }) - } - data = appendPacketV2(data, packetV2{ - fieldType: fieldIdentifier, - data: m.id, - }) - data = appendEOSV2(data) - for _, cav := range m.caveats { - if len(cav.Location) > 0 { - data = appendPacketV2(data, packetV2{ - fieldType: fieldLocation, - data: []byte(cav.Location), - }) - } - data = appendPacketV2(data, packetV2{ - fieldType: fieldIdentifier, - data: cav.Id, - }) - if len(cav.VerificationId) > 0 { - data = appendPacketV2(data, packetV2{ - fieldType: fieldVerificationId, - data: []byte(cav.VerificationId), - }) - } - data = appendEOSV2(data) - } - data = appendEOSV2(data) - data = appendPacketV2(data, packetV2{ - fieldType: fieldSignature, - data: m.sig[:], - }) - return data -} diff --git a/vendor/gopkg.in/macaroon.v2/marshal.go b/vendor/gopkg.in/macaroon.v2/marshal.go deleted file mode 100644 index 7bfb99bb..00000000 --- a/vendor/gopkg.in/macaroon.v2/marshal.go +++ /dev/null @@ -1,239 +0,0 @@ -package macaroon - -import ( - "encoding/base64" - "encoding/json" - "fmt" -) - -// Version specifies the version of a macaroon. -// In version 1, the macaroon id and all caveats -// must be UTF-8-compatible strings, and the -// size of any part of the macaroon may not exceed -// approximately 64K. In version 2, -// all field may be arbitrary binary blobs. -type Version uint16 - -const ( - // V1 specifies version 1 macaroons. - V1 Version = 1 - - // V2 specifies version 2 macaroons. - V2 Version = 2 - - // LatestVersion holds the latest supported version. - LatestVersion = V2 -) - -// String returns a string representation of the version; -// for example V1 formats as "v1". -func (v Version) String() string { - return fmt.Sprintf("v%d", v) -} - -// Version returns the version of the macaroon. -func (m *Macaroon) Version() Version { - return m.version -} - -// MarshalJSON implements json.Marshaler by marshaling the -// macaroon in JSON format. The serialisation format is determined -// by the macaroon's version. -func (m *Macaroon) MarshalJSON() ([]byte, error) { - switch m.version { - case V1: - return m.marshalJSONV1() - case V2: - return m.marshalJSONV2() - default: - return nil, fmt.Errorf("unknown version %v", m.version) - } -} - -// UnmarshalJSON implements json.Unmarshaller by unmarshaling -// the given macaroon in JSON format. It accepts both V1 and V2 -// forms encoded forms, and also a base64-encoded JSON string -// containing the binary-marshaled macaroon. -// -// After unmarshaling, the macaroon's version will reflect -// the version that it was unmarshaled as. -func (m *Macaroon) UnmarshalJSON(data []byte) error { - if data[0] == '"' { - // It's a string, so it must be a base64-encoded binary form. - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - data, err := Base64Decode([]byte(s)) - if err != nil { - return err - } - if err := m.UnmarshalBinary(data); err != nil { - return err - } - return nil - } - // Not a string; try to unmarshal into both kinds of macaroon object. - // This assumes that neither format has any fields in common. - // For subsequent versions we may need to change this approach. - type MacaroonJSONV1 macaroonJSONV1 - type MacaroonJSONV2 macaroonJSONV2 - var both struct { - *MacaroonJSONV1 - *MacaroonJSONV2 - } - if err := json.Unmarshal(data, &both); err != nil { - return err - } - switch { - case both.MacaroonJSONV1 != nil && both.MacaroonJSONV2 != nil: - return fmt.Errorf("cannot determine macaroon encoding version") - case both.MacaroonJSONV1 != nil: - if err := m.initJSONV1((*macaroonJSONV1)(both.MacaroonJSONV1)); err != nil { - return err - } - m.version = V1 - case both.MacaroonJSONV2 != nil: - if err := m.initJSONV2((*macaroonJSONV2)(both.MacaroonJSONV2)); err != nil { - return err - } - m.version = V2 - default: - return fmt.Errorf("invalid JSON macaroon encoding") - } - return nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -// It accepts both V1 and V2 binary encodings. -func (m *Macaroon) UnmarshalBinary(data []byte) error { - // Copy the data to avoid retaining references to it - // in the internal data structures. - data = append([]byte(nil), data...) - _, err := m.parseBinary(data) - return err -} - -// parseBinary parses the macaroon in binary format -// from the given data and returns where the parsed data ends. -// -// It retains references to data. -func (m *Macaroon) parseBinary(data []byte) ([]byte, error) { - if len(data) == 0 { - return nil, fmt.Errorf("empty macaroon data") - } - v := data[0] - if v == 2 { - // Version 2 binary format. - data, err := m.parseBinaryV2(data) - if err != nil { - return nil, fmt.Errorf("unmarshal v2: %v", err) - } - m.version = V2 - return data, nil - } - if isASCIIHex(v) { - // It's a hex digit - version 1 binary format - data, err := m.parseBinaryV1(data) - if err != nil { - return nil, fmt.Errorf("unmarshal v1: %v", err) - } - m.version = V1 - return data, nil - } - return nil, fmt.Errorf("cannot determine data format of binary-encoded macaroon") -} - -// MarshalBinary implements encoding.BinaryMarshaler by -// formatting the macaroon according to the version specified -// by MarshalAs. -func (m *Macaroon) MarshalBinary() ([]byte, error) { - return m.appendBinary(nil) -} - -// appendBinary appends the binary-formatted macaroon to -// the given data, formatting it according to the macaroon's -// version. -func (m *Macaroon) appendBinary(data []byte) ([]byte, error) { - switch m.version { - case V1: - return m.appendBinaryV1(data) - case V2: - return m.appendBinaryV2(data), nil - default: - return nil, fmt.Errorf("bad macaroon version %v", m.version) - } -} - -// Slice defines a collection of macaroons. By convention, the -// first macaroon in the slice is a primary macaroon and the rest -// are discharges for its third party caveats. -type Slice []*Macaroon - -// MarshalBinary implements encoding.BinaryMarshaler. -func (s Slice) MarshalBinary() ([]byte, error) { - var data []byte - var err error - for _, m := range s { - data, err = m.appendBinary(data) - if err != nil { - return nil, fmt.Errorf("failed to marshal macaroon %q: %v", m.Id(), err) - } - } - return data, nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -// It accepts all known binary encodings for the data - all the -// embedded macaroons need not be encoded in the same format. -func (s *Slice) UnmarshalBinary(data []byte) error { - // Prevent the internal data structures from holding onto the - // slice by copying it first. - data = append([]byte(nil), data...) - *s = (*s)[:0] - for len(data) > 0 { - var m Macaroon - rest, err := m.parseBinary(data) - if err != nil { - return fmt.Errorf("cannot unmarshal macaroon: %v", err) - } - *s = append(*s, &m) - data = rest - } - return nil -} - -const ( - padded = 1 << iota - stdEncoding -) - -var codecs = [4]*base64.Encoding{ - 0: base64.RawURLEncoding, - padded: base64.URLEncoding, - stdEncoding: base64.RawStdEncoding, - stdEncoding | padded: base64.StdEncoding, -} - -// Base64Decode base64-decodes the given data. -// It accepts both standard and URL encodings, both -// padded and unpadded. -func Base64Decode(data []byte) ([]byte, error) { - encoding := 0 - if len(data) > 0 && data[len(data)-1] == '=' { - encoding |= padded - } - for _, b := range data { - if b == '/' || b == '+' { - encoding |= stdEncoding - break - } - } - codec := codecs[encoding] - buf := make([]byte, codec.DecodedLen(len(data))) - n, err := codec.Decode(buf, data) - if err == nil { - return buf[0:n], nil - } - return nil, err -} diff --git a/vendor/gopkg.in/macaroon.v2/packet-v1.go b/vendor/gopkg.in/macaroon.v2/packet-v1.go deleted file mode 100644 index 10e9f7b7..00000000 --- a/vendor/gopkg.in/macaroon.v2/packet-v1.go +++ /dev/null @@ -1,133 +0,0 @@ -package macaroon - -import ( - "bytes" - "fmt" -) - -// field names, as defined in libmacaroons -const ( - fieldNameLocation = "location" - fieldNameIdentifier = "identifier" - fieldNameSignature = "signature" - fieldNameCaveatId = "cid" - fieldNameVerificationId = "vid" - fieldNameCaveatLocation = "cl" -) - -// maxPacketV1Len is the maximum allowed length of a packet in the v1 macaroon -// serialization format. -const maxPacketV1Len = 0xffff - -// The original macaroon binary encoding is made from a sequence -// of "packets", each of which has a field name and some data. -// The encoding is: -// -// - four ascii hex digits holding the entire packet size (including -// the digits themselves). -// -// - the field name, followed by an ascii space. -// -// - the raw data -// -// - a newline (\n) character -// -// The packet struct below holds a reference into Macaroon.data. -type packetV1 struct { - // ftype holds the field name of the packet. - fieldName []byte - - // data holds the packet's data. - data []byte - - // len holds the total length in bytes - // of the packet, including any header. - totalLen int -} - -// parsePacket parses the packet at the start of the -// given data. -func parsePacketV1(data []byte) (packetV1, error) { - if len(data) < 6 { - return packetV1{}, fmt.Errorf("packet too short") - } - plen, ok := parseSizeV1(data) - if !ok { - return packetV1{}, fmt.Errorf("cannot parse size") - } - if plen > len(data) { - return packetV1{}, fmt.Errorf("packet size too big") - } - if plen < 4 { - return packetV1{}, fmt.Errorf("packet size too small") - } - data = data[4:plen] - i := bytes.IndexByte(data, ' ') - if i <= 0 { - return packetV1{}, fmt.Errorf("cannot parse field name") - } - fieldName := data[0:i] - if data[len(data)-1] != '\n' { - return packetV1{}, fmt.Errorf("no terminating newline found") - } - return packetV1{ - fieldName: fieldName, - data: data[i+1 : len(data)-1], - totalLen: plen, - }, nil -} - -// appendPacketV1 appends a packet with the given field name -// and data to the given buffer. If the field and data were -// too long to be encoded, it returns nil, false; otherwise -// it returns the appended buffer. -func appendPacketV1(buf []byte, field string, data []byte) ([]byte, bool) { - plen := packetV1Size(field, data) - if plen > maxPacketV1Len { - return nil, false - } - buf = appendSizeV1(buf, plen) - buf = append(buf, field...) - buf = append(buf, ' ') - buf = append(buf, data...) - buf = append(buf, '\n') - return buf, true -} - -func packetV1Size(field string, data []byte) int { - return 4 + len(field) + 1 + len(data) + 1 -} - -var hexDigits = []byte("0123456789abcdef") - -func appendSizeV1(data []byte, size int) []byte { - return append(data, - hexDigits[size>>12], - hexDigits[(size>>8)&0xf], - hexDigits[(size>>4)&0xf], - hexDigits[size&0xf], - ) -} - -func parseSizeV1(data []byte) (int, bool) { - d0, ok0 := asciiHex(data[0]) - d1, ok1 := asciiHex(data[1]) - d2, ok2 := asciiHex(data[2]) - d3, ok3 := asciiHex(data[3]) - return d0<<12 + d1<<8 + d2<<4 + d3, ok0 && ok1 && ok2 && ok3 -} - -func asciiHex(b byte) (int, bool) { - switch { - case b >= '0' && b <= '9': - return int(b) - '0', true - case b >= 'a' && b <= 'f': - return int(b) - 'a' + 0xa, true - } - return 0, false -} - -func isASCIIHex(b byte) bool { - _, ok := asciiHex(b) - return ok -} diff --git a/vendor/gopkg.in/macaroon.v2/packet-v2.go b/vendor/gopkg.in/macaroon.v2/packet-v2.go deleted file mode 100644 index 9725dc38..00000000 --- a/vendor/gopkg.in/macaroon.v2/packet-v2.go +++ /dev/null @@ -1,117 +0,0 @@ -package macaroon - -import ( - "encoding/binary" - "fmt" -) - -type fieldType int - -// Field constants as used in the binary encoding. -const ( - fieldEOS fieldType = 0 - fieldLocation fieldType = 1 - fieldIdentifier fieldType = 2 - fieldVerificationId fieldType = 4 - fieldSignature fieldType = 6 -) - -type packetV2 struct { - // fieldType holds the type of the field. - fieldType fieldType - - // data holds the packet's data. - data []byte -} - -// parseSectionV2 parses a sequence of packets -// in data. The sequence is terminated by a packet -// with a field type of fieldEOS. -func parseSectionV2(data []byte) ([]byte, []packetV2, error) { - prevFieldType := fieldType(-1) - var packets []packetV2 - for { - if len(data) == 0 { - return nil, nil, fmt.Errorf("section extends past end of buffer") - } - rest, p, err := parsePacketV2(data) - if err != nil { - return nil, nil, err - } - if p.fieldType == fieldEOS { - return rest, packets, nil - } - if p.fieldType <= prevFieldType { - return nil, nil, fmt.Errorf("fields out of order") - } - packets = append(packets, p) - prevFieldType = p.fieldType - data = rest - } -} - -// parsePacketV2 parses a V2 data package at the start -// of the given data. -// The format of a packet is as follows: -// -// fieldType(varint) payloadLen(varint) data[payloadLen bytes] -// -// apart from fieldEOS which has no payloadLen or data (it's -// a single zero byte). -func parsePacketV2(data []byte) ([]byte, packetV2, error) { - data, ft, err := parseVarint(data) - if err != nil { - return nil, packetV2{}, err - } - p := packetV2{ - fieldType: fieldType(ft), - } - if p.fieldType == fieldEOS { - return data, p, nil - } - data, payloadLen, err := parseVarint(data) - if err != nil { - return nil, packetV2{}, err - } - if payloadLen > len(data) { - return nil, packetV2{}, fmt.Errorf("field data extends past end of buffer") - } - p.data = data[0:payloadLen] - return data[payloadLen:], p, nil -} - -// parseVarint parses the variable-length integer -// at the start of the given data and returns rest -// of the buffer and the number. -func parseVarint(data []byte) ([]byte, int, error) { - val, n := binary.Uvarint(data) - if n > 0 { - if val > 0x7fffffff { - return nil, 0, fmt.Errorf("varint value out of range") - } - return data[n:], int(val), nil - } - if n == 0 { - return nil, 0, fmt.Errorf("varint value extends past end of buffer") - } - return nil, 0, fmt.Errorf("varint value out of range") -} - -func appendPacketV2(data []byte, p packetV2) []byte { - data = appendVarint(data, int(p.fieldType)) - if p.fieldType != fieldEOS { - data = appendVarint(data, len(p.data)) - data = append(data, p.data...) - } - return data -} - -func appendEOSV2(data []byte) []byte { - return append(data, 0) -} - -func appendVarint(data []byte, x int) []byte { - var buf [binary.MaxVarintLen32]byte - n := binary.PutUvarint(buf[:], uint64(x)) - return append(data, buf[:n]...) -} diff --git a/vendor/gopkg.in/macaroon.v2/trace.go b/vendor/gopkg.in/macaroon.v2/trace.go deleted file mode 100644 index 581a998b..00000000 --- a/vendor/gopkg.in/macaroon.v2/trace.go +++ /dev/null @@ -1,102 +0,0 @@ -package macaroon - -import ( - "fmt" -) - -// Trace holds all toperations involved in verifying a macaroon, -// and the root key used as the initial verification key. -// This can be useful for debugging macaroon implementations. -type Trace struct { - RootKey []byte - Ops []TraceOp -} - -// Results returns the output from all operations in the Trace. -// The result from ts.Ops[i] will be in the i'th element of the -// returned slice. -// When a trace has resulted in a failure, the -// last element will be nil. -func (t Trace) Results() [][]byte { - r := make([][]byte, len(t.Ops)) - input := t.RootKey - for i, op := range t.Ops { - input = op.Result(input) - r[i] = input - } - return r -} - -// TraceOp holds one possible operation when verifying a macaroon. -type TraceOp struct { - Kind TraceOpKind `json:"kind"` - Data1 []byte `json:"data1,omitempty"` - Data2 []byte `json:"data2,omitempty"` -} - -// Result returns the result of computing the given -// operation with the given input data. -// If op is TraceFail, it returns nil. -func (op TraceOp) Result(input []byte) []byte { - switch op.Kind { - case TraceMakeKey: - return makeKey(input)[:] - case TraceHash: - if len(op.Data2) == 0 { - return keyedHash(bytesToKey(input), op.Data1)[:] - } - return keyedHash2(bytesToKey(input), op.Data1, op.Data2)[:] - case TraceBind: - return bindForRequest(op.Data1, bytesToKey(input))[:] - case TraceFail: - return nil - default: - panic(fmt.Errorf("unknown trace operation kind %d", op.Kind)) - } -} - -func bytesToKey(data []byte) *[keyLen]byte { - var key [keyLen]byte - if len(data) != keyLen { - panic(fmt.Errorf("unexpected input key length; got %d want %d", len(data), keyLen)) - } - copy(key[:], data) - return &key -} - -// TraceOpKind represents the kind of a macaroon verification operation. -type TraceOpKind int - -const ( - TraceInvalid = TraceOpKind(iota) - - // TraceMakeKey represents the operation of calculating a - // fixed length root key from the variable length input key. - TraceMakeKey - - // TraceHash represents a keyed hash operation with one - // or two values. If there is only one value, it will be in Data1. - TraceHash - - // TraceBind represents the operation of binding a discharge macaroon - // to its primary macaroon. Data1 holds the signature of the primary - // macaroon. - TraceBind - - // TraceFail represents a verification failure. If present, this will always - // be the last operation in a trace. - TraceFail -) - -var traceOps = []string{ - TraceInvalid: "invalid", - TraceMakeKey: "makekey", - TraceHash: "hash", - TraceBind: "bind", - TraceFail: "fail", -} - -// String returns a string representation of the operation. -func (k TraceOpKind) String() string { - return traceOps[k] -} diff --git a/vendor/modules.txt b/vendor/modules.txt index e2af7de0..acf00073 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -16,7 +16,6 @@ github.com/cespare/xxhash/v2 github.com/chzyer/readline # github.com/cloudbase/garm-provider-common v0.1.1-0.20231012061429-49001794e700 ## explicit; go 1.20 -github.com/cloudbase/garm-provider-common/cloudconfig github.com/cloudbase/garm-provider-common/defaults github.com/cloudbase/garm-provider-common/errors github.com/cloudbase/garm-provider-common/execution @@ -29,11 +28,6 @@ github.com/davecgh/go-spew/spew # github.com/felixge/httpsnoop v1.0.3 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 -## explicit; go 1.14 -github.com/flosch/pongo2 -# github.com/frankban/quicktest v1.14.3 -## explicit; go 1.13 # github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr @@ -41,15 +35,6 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1 -## explicit; go 1.17 -github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery -github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers -github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery -github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil -# github.com/go-macaroon-bakery/macaroonpb v1.0.0 -## explicit; go 1.16 -github.com/go-macaroon-bakery/macaroonpb # github.com/go-openapi/analysis v0.21.4 ## explicit; go 1.13 github.com/go-openapi/analysis @@ -145,42 +130,15 @@ github.com/juju/clock # github.com/juju/errors v1.0.0 ## explicit; go 1.18 github.com/juju/errors +# github.com/juju/loggo v1.0.0 +## explicit; go 1.14 # github.com/juju/retry v1.0.0 ## explicit; go 1.17 github.com/juju/retry # github.com/juju/testing v1.0.2 ## explicit; go 1.17 -# github.com/juju/webbrowser v1.0.0 -## explicit; go 1.11 -github.com/juju/webbrowser -# github.com/julienschmidt/httprouter v1.3.0 -## explicit; go 1.7 -github.com/julienschmidt/httprouter -# github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 -## explicit -github.com/kballard/go-shellquote -# github.com/kr/fs v0.1.0 -## explicit -github.com/kr/fs # github.com/kr/pretty v0.3.1 ## explicit; go 1.12 -# github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce -## explicit; go 1.18 -github.com/lxc/lxd/client -github.com/lxc/lxd/lxd/device/config -github.com/lxc/lxd/lxd/instance/instancetype -github.com/lxc/lxd/lxd/revert -github.com/lxc/lxd/shared -github.com/lxc/lxd/shared/api -github.com/lxc/lxd/shared/cancel -github.com/lxc/lxd/shared/ioprogress -github.com/lxc/lxd/shared/logger -github.com/lxc/lxd/shared/osarch -github.com/lxc/lxd/shared/simplestreams -github.com/lxc/lxd/shared/tcp -github.com/lxc/lxd/shared/termios -github.com/lxc/lxd/shared/units -github.com/lxc/lxd/shared/validate # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer @@ -228,19 +186,9 @@ github.com/oklog/ulid github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log -# github.com/pborman/uuid v1.2.1 -## explicit -github.com/pborman/uuid # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pkg/sftp v1.13.5 -## explicit; go 1.15 -github.com/pkg/sftp -github.com/pkg/sftp/internal/encoding/ssh/filexfer -# github.com/pkg/xattr v0.4.9 -## explicit; go 1.14 -github.com/pkg/xattr # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib @@ -265,17 +213,6 @@ github.com/prometheus/procfs/internal/util # github.com/rivo/uniseg v0.4.4 ## explicit; go 1.18 github.com/rivo/uniseg -# github.com/robfig/cron/v3 v3.0.1 -## explicit; go 1.12 -github.com/robfig/cron/v3 -# github.com/rogpeppe/fastuuid v1.2.0 -## explicit; go 1.12 -github.com/rogpeppe/fastuuid -# github.com/sirupsen/logrus v1.9.0 -## explicit; go 1.13 -github.com/sirupsen/logrus -github.com/sirupsen/logrus/hooks/syslog -github.com/sirupsen/logrus/hooks/writer # github.com/spf13/cobra v1.7.1-0.20230723113155-fd865a44e3c4 ## explicit; go 1.15 github.com/spf13/cobra @@ -322,28 +259,15 @@ go.opentelemetry.io/otel/trace # golang.org/x/crypto v0.12.0 ## explicit; go 1.17 golang.org/x/crypto/bcrypt -golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 -golang.org/x/crypto/curve25519 -golang.org/x/crypto/curve25519/internal/field -golang.org/x/crypto/ed25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -golang.org/x/crypto/nacl/box -golang.org/x/crypto/nacl/secretbox -golang.org/x/crypto/salsa20/salsa -golang.org/x/crypto/ssh -golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -golang.org/x/crypto/ssh/terminal # golang.org/x/net v0.14.0 ## explicit; go 1.17 golang.org/x/net/context -golang.org/x/net/html -golang.org/x/net/html/atom -golang.org/x/net/publicsuffix # golang.org/x/oauth2 v0.11.0 ## explicit; go 1.18 golang.org/x/oauth2 @@ -355,12 +279,8 @@ golang.org/x/sync/errgroup ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader -golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.11.0 -## explicit; go 1.17 -golang.org/x/term # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine/internal @@ -403,15 +323,6 @@ google.golang.org/protobuf/types/known/timestamppb # gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 ## explicit gopkg.in/DATA-DOG/go-sqlmock.v1 -# gopkg.in/errgo.v1 v1.0.1 -## explicit -gopkg.in/errgo.v1 -# gopkg.in/httprequest.v1 v1.2.1 -## explicit; go 1.15 -gopkg.in/httprequest.v1 -# gopkg.in/macaroon.v2 v2.1.0 -## explicit -gopkg.in/macaroon.v2 # gopkg.in/natefinch/lumberjack.v2 v2.2.1 ## explicit; go 1.13 gopkg.in/natefinch/lumberjack.v2 From d1d8bfa703fa15a2c3a39e8a3e8a87a2920ce503 Mon Sep 17 00:00:00 2001 From: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com> Date: Mon, 18 Dec 2023 14:49:36 +0000 Subject: [PATCH 2/4] Update docs Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com> --- README.md | 16 ++---- doc/providers.md | 121 ++-------------------------------------------- doc/quickstart.md | 44 ++++++++++++----- 3 files changed, 41 insertions(+), 140 deletions(-) diff --git a/README.md b/README.md index 74188d1a..0430ecbf 100644 --- a/README.md +++ b/README.md @@ -30,17 +30,17 @@ Thanks to the efforts of the amazing folks at @mercedes-benz, GARM can now be in ## Supported providers -GARM has a built-in LXD provider that you can use out of the box to spin up runners on any machine that runs either a stand-alone LXD instance, or an LXD cluster. The quick start guide mentioned above will get you up and running with the LXD provider. - -GARM also supports external providers for a variety of other targets. +GARM uses providers to create runners in a particular IaaS. The providers are external executables that GARM calls into to create runners. Before you can create runners, you'll need to install at least one provider. ## Installing external providers -External providers are binaries that GARM calls into to create runners in a particular IaaS. There are currently two external providers available: +External providers are binaries that GARM calls into to create runners in a particular IaaS. There are several external providers available: * [OpenStack](https://github.com/cloudbase/garm-provider-openstack) * [Azure](https://github.com/cloudbase/garm-provider-azure) * [Kubernetes](https://github.com/mercedes-benz/garm-provider-k8s) - Thanks to the amazing folks at @mercedes-benz for sharing their awesome provider! +* [LXD](https://github.com/cloudbase/garm-provider-lxd) +* [Incus](https://github.com/cloudbase/garm-provider-incus) Follow the instructions in the README of each provider to install them. @@ -62,10 +62,4 @@ If you would like to optimize the startup time of new instance, take a look at t ## Write your own provider -The providers are interfaces between ```GARM``` and a particular IaaS in which we spin up GitHub Runners. These providers can be either **native** or **external**. The **native** providers are written in ```Go```, and must implement [the interface defined here](https://github.com/cloudbase/garm/blob/main/runner/common/provider.go#L22-L39). **External** providers can be written in any language, as they are in the form of an external executable that ```GARM``` calls into. - -There is currently one **native** provider for [LXD](https://linuxcontainers.org/lxd/) and several **external** providers linked above. - -If you want to write your own provider, you can choose to write a native one, or implement an **external** one. I encourage you to opt for an **external** provider, as those are the easiest to write and you don't need to merge it in GARM itself to be able to use. Faster to write, faster to iterate. The LXD provider may at some point be split from GARM into it's own external project, at which point we will remove the native provider interface and only support external providers. - -Please see the [Writing an external provider](/doc/external_provider.md) document for details. Also, feel free to inspect the two available sample external providers in this repository. +The providers are interfaces between ```GARM``` and a particular IaaS in which we spin up GitHub Runners. **External** providers can be written in any language, as they are in the form of an external executable that ```GARM``` calls into. Please see the [Writing an external provider](/doc/external_provider.md) document for details. Also, feel free to inspect the two available sample external providers in this repository. diff --git a/doc/providers.md b/doc/providers.md index 7b7d686b..d1f891f0 100644 --- a/doc/providers.md +++ b/doc/providers.md @@ -1,126 +1,10 @@ # Provider configuration -GARM was designed to be extensible. Providers can be written either as built-in plugins or as external executables. The built-in plugins are written in Go, and they are compiled into the ```GARM``` binary. External providers are executables that implement the needed interface to create/delete/list compute systems that are used by ```GARM``` to create runners. +GARM was designed to be extensible. Providers can be written as external executables. External providers are executables that implement the needed interface to create/delete/list compute systems that are used by ```GARM``` to create runners. -GARM currently ships with one built-in provider for [LXD](https://linuxcontainers.org/lxd/introduction/) and the external provider interface which allows you to write your own provider in any language you want. - -- [LXD provider](#lxd-provider) - - [LXD remotes](#lxd-remotes) - - [LXD Security considerations](#lxd-security-considerations) - [External provider](#external-provider) - [Available external providers](#available-external-providers) -## LXD provider - -GARM leverages LXD to create the runners. Here is a sample config section for an LXD provider: - -```toml -# Currently, providers are defined statically in the config. This is due to the fact -# that we have not yet added support for storing secrets in something like Barbican -# or Vault. This will change in the future. However, for now, it's important to remember -# that once you create a pool using one of the providers defined here, the name of that -# provider must not be changed, or the pool will no longer work. Make sure you remove any -# pools before removing or changing a provider. -[[provider]] - # An arbitrary string describing this provider. - name = "lxd_local" - # Provider type. GARM is designed to allow creating providers which are used to spin - # up compute resources, which in turn will run the github runner software. - # Currently, LXD is the only supprted provider, but more will be written in the future. - provider_type = "lxd" - # A short description of this provider. The name, description and provider types will - # be included in the information returned by the API when listing available providers. - description = "Local LXD installation" - [provider.lxd] - # the path to the unix socket that LXD is listening on. This works if GARM and LXD - # are on the same system, and this option takes precedence over the "url" option, - # which connects over the network. - unix_socket_path = "/var/snap/lxd/common/lxd/unix.socket" - # When defining a pool for a repository or an organization, you have an option to - # specify a "flavor". In LXD terms, this translates to "profiles". Profiles allow - # you to customize your instances (memory, cpu, disks, nics, etc). - # This option allows you to inject the "default" profile along with the profile selected - # by the flavor. - include_default_profile = false - # instance_type defines the type of instances this provider will create. - # - # Options are: - # - # * virtual-machine (default) - # * container - # - instance_type = "container" - # enable/disable secure boot. If the image you select for the pool does not have a - # signed bootloader, set this to false, otherwise your instances won't boot. - secure_boot = false - # Project name to use. You can create a separate project in LXD for runners. - project_name = "default" - # URL is the address on which LXD listens for connections (ex: https://example.com:8443) - url = "" - # GARM supports certificate authentication for LXD remote connections. The easiest way - # to get the needed certificates, is to install the lxc client and add a remote. The - # client_certificate, client_key and tls_server_certificate can be then fetched from - # $HOME/snap/lxd/common/config. - client_certificate = "" - client_key = "" - tls_server_certificate = "" - [provider.lxd.image_remotes] - # Image remotes are important. These are the default remotes used by lxc. The names - # of these remotes are important. When specifying an "image" for the pool, that image - # can be a hash of an existing image on your local LXD installation or it can be a - # remote image from one of these remotes. You can specify the images as follows: - # Example: - # - # * ubuntu:20.04 - # * ubuntu_daily:20.04 - # * images:centos/8/cloud - # - # Ubuntu images come pre-installed with cloud-init which we use to set up the runner - # automatically and customize the runner. For non Ubuntu images, you need to use the - # variant that has "/cloud" in the name. Those images come with cloud-init. - [provider.lxd.image_remotes.ubuntu] - addr = "https://cloud-images.ubuntu.com/releases" - public = true - protocol = "simplestreams" - skip_verify = false - [provider.lxd.image_remotes.ubuntu_daily] - addr = "https://cloud-images.ubuntu.com/daily" - public = true - protocol = "simplestreams" - skip_verify = false - [provider.lxd.image_remotes.images] - addr = "https://images.linuxcontainers.org" - public = true - protocol = "simplestreams" - skip_verify = false -``` - -You can choose to connect to a local LXD server by using the ```unix_socket_path``` option, or you can connect to a remote LXD cluster/server by using the ```url``` option. If both are specified, the unix socket takes precedence. The config file is fairly well commented, but I will add a note about remotes. - -### LXD remotes - -By default, GARM does not load any image remotes. You get to choose which remotes you add (if any). An image remote is a repository of images that LXD uses to create new instances, either virtual machines or containers. In the absence of any remote, GARM will attempt to find the image you configure for a pool of runners, on the LXD server we're connecting to. If one is present, it will be used, otherwise it will fail and you will need to configure a remote. - -The sample config file in this repository has the usual default ```LXD``` remotes: - -* <https://cloud-images.ubuntu.com/releases> (ubuntu) - Official Ubuntu images -* <https://cloud-images.ubuntu.com/daily> (ubuntu_daily) - Official Ubuntu images, daily build -* <https://images.linuxcontainers.org> (images) - Community maintained images for various operating systems - -When creating a new pool, you'll be able to specify which image you want to use. The images are referenced by ```remote_name:image_tag```. For example, if you want to launch a runner on an Ubuntu 20.04, the image name would be ```ubuntu:20.04```. For a daily image it would be ```ubuntu_daily:20.04```. And for one of the unofficial images it would be ```images:centos/8-Stream/cloud```. Note, for unofficial images you need to use the tags that have ```/cloud``` in the name. These images come pre-installed with ```cloud-init``` which we need to set up the runners automatically. - -You can also create your own image remote, where you can host your own custom images. If you want to build your own images, have a look at [distrobuilder](https://github.com/lxc/distrobuilder). - -Image remotes in the ```GARM``` config, is a map of strings to remote settings. The name of the remote is the last bit of string in the section header. For example, the following section ```[provider.lxd.image_remotes.ubuntu_daily]```, defines the image remote named **ubuntu_daily**. Use this name to reference images inside that remote. - -You can also use locally uploaded images. Check out the [performance considerations](./performance_considerations.md) page for details on how to customize local images and use them with GARM. - -### LXD Security considerations - -GARM does not apply any ACLs of any kind to the instances it creates. That task remains in the responsibility of the user. [Here is a guide for creating ACLs in LXD](https://linuxcontainers.org/lxd/docs/master/howto/network_acls/). You can of course use ```iptables``` or ```nftables``` to create any rules you wish. I recommend you create a separate isolated lxd bridge for runners, and secure it using ACLs/iptables/nftables. - -You must make sure that the code that runs as part of the workflows is trusted, and if that cannot be done, you must make sure that any malicious code that will be pulled in by the actions and run as part of a workload, is as contained as possible. There is a nice article about [securing your workflow runs here](https://blog.gitguardian.com/github-actions-security-cheat-sheet/). - ## External provider The external provider is a special kind of provider. It delegates the functionality needed to create the runners to external executables. These executables can be either binaries or scripts. As long as they adhere to the needed interface, they can be used to create runners in any target IaaS. This is identical to what ```containerd``` does with ```CNIs```. @@ -163,6 +47,9 @@ For non testing purposes, there are two external providers currently available: * [OpenStack](https://github.com/cloudbase/garm-provider-openstack) * [Azure](https://github.com/cloudbase/garm-provider-azure) +* [Kubernetes](https://github.com/mercedes-benz/garm-provider-k8s) - Thanks to the amazing folks at @mercedes-benz for sharing their awesome provider! +* [LXD](https://github.com/cloudbase/garm-provider-lxd) +* [Incus](https://github.com/cloudbase/garm-provider-incus) Details on how to install and configure them are available in their respective repositories. diff --git a/doc/quickstart.md b/doc/quickstart.md index 9bf6587c..9f90e960 100644 --- a/doc/quickstart.md +++ b/doc/quickstart.md @@ -96,23 +96,31 @@ At this point, we have a valid config file, but we still need to add `provider` This is where you have a decision to make. GARM has a number of providers you can leverage. At the time of this writing, we have support for: -* LXD -* Azure -* OpenStack +* [OpenStack](https://github.com/cloudbase/garm-provider-openstack) +* [Azure](https://github.com/cloudbase/garm-provider-azure) +* [Kubernetes](https://github.com/mercedes-benz/garm-provider-k8s) - Thanks to the amazing folks at @mercedes-benz for sharing their awesome provider! +* [LXD](https://github.com/cloudbase/garm-provider-lxd) +* [Incus](https://github.com/cloudbase/garm-provider-incus) -The LXD provider is built into GARM itself and has no external requirements. The [Azure](https://github.com/cloudbase/garm-provider-azure) and [OpenStack](https://github.com/cloudbase/garm-provider-openstack) ones are `external` providers in the form of an executable that GARM calls into. +All currently available providers are `external`. -Both the LXD and the external provider configs are [documented in a separate doc](./providers.md). - -The easiest provider to set up is probably the LXD provider. You don't need an account on an external cloud. You can just use your machine. +The easiest provider to set up is probably the LXD or Incus provider. Incus is a fork of LXD so the functionality is identical (for now). For the purpose of this document, we'll continue with LXD. You don't need an account on an external cloud. You can just use your machine. You will need to have LXD installed and configured. There is an excellent [getting started guide](https://documentation.ubuntu.com/lxd/en/latest/getting_started/) for LXD. Follow the instructions there to install and configure LXD, then come back here. -Once you have LXD installed and configured, you can add the provider section to your config file. If you're connecting to the `local` LXD installation, the [config snippet for the LXD provider](./providers.md#lxd-provider) will work out of the box. We'll be connecting using the unix socket so no further configuration will be needed. +Once you have LXD installed and configured, you can add the provider section to your config file. If you're connecting to the `local` LXD installation, the [config snippet for the LXD provider](https://github.com/cloudbase/garm-provider-lxd/blob/main/testdata/garm-provider-lxd.toml) will work out of the box. We'll be connecting using the unix socket so no further configuration will be needed. -Go ahead and copy and paste that entire snippet in your GARM config file (`/etc/garm/config.toml`). +Go ahead and create a new config somwhere where GARM can access it and paste that entire snippet. For the purposes of this doc, we'll assume you created a new file called `/etc/garm/garm-provider-lxd.toml`. Now we need to define the external provider config in `/etc/garm/config.toml`: -You can also use an external provider instead of LXD. You will need to define the provider section in your config file and point it to the executable and the provider config file. The [config snippet for the external provider](./providers.md#external-provider) gives you an example of how that can be done. Configuring the external provider is outside the scope of this guide. You will need to consult the documentation for the external provider you want to use. +```toml +[[provider]] + name = "lxd_local" + provider_type = "external" + description = "Local LXD installation" + [provider.external] + provider_executable = "/opt/garm/providers.d/garm-provider-lxd" + config_file = "/etc/garm/garm-provider-lxd.toml" +``` ## The credentials section @@ -154,7 +162,7 @@ docker run -d \ -p 80:80 \ -v /etc/garm:/etc/garm:rw \ -v /var/snap/lxd/common/lxd/unix.socket:/var/snap/lxd/common/lxd/unix.socket:rw \ - ghcr.io/cloudbase/garm:v0.1.3 + ghcr.io/cloudbase/garm:v0.1.4 ``` You will notice we also mounted the LXD unix socket from the host inside the container where the config you pasted expects to find it. If you plan to use an external provider that does not need to connect to LXD over a unix socket, feel free to remove that mount. @@ -187,7 +195,7 @@ Adding the `garm` user to the LXD group will allow it to connect to the LXD unix Next, download the latest release from the [releases page](https://github.com/cloudbase/garm/releases). ```bash -wget -q -O - https://github.com/cloudbase/garm/releases/download/v0.1.3/garm-linux-amd64.tgz | tar xzf - -C /usr/local/bin/ +wget -q -O - https://github.com/cloudbase/garm/releases/download/v0.1.4/garm-linux-amd64.tgz | tar xzf - -C /usr/local/bin/ ``` We'll be running under an unprivileged user. If we want to be able to listen on any port under `1024`, we'll have to set some capabilities on the binary: @@ -196,6 +204,18 @@ We'll be running under an unprivileged user. If we want to be able to listen on setcap cap_net_bind_service=+ep /usr/local/bin/garm ``` +Create a folder for the external providers: + +```bash +sudo mkdir -p /opt/garm/providers.d +``` + +Download the LXD provider binary: + +```bash +wget -q -O - https://github.com/cloudbase/garm-provider-lxd/releases/download/v0.1.0/garm-linux-amd64.tgz | sudo tar xzf - -C /opt/garm/providers.d/ +``` + Change the permissions on the config dir: ```bash From c4b2a3cd1fa2cd526ab667f52470a38a4aeaa3b4 Mon Sep 17 00:00:00 2001 From: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com> Date: Mon, 18 Dec 2023 14:55:56 +0000 Subject: [PATCH 3/4] Update Dockerfile Add new providers to Dockerfile: * k8s * lxd * incus Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com> --- Dockerfile | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Dockerfile b/Dockerfile index 2972344d..23fbece4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,9 @@ ADD . /build/garm RUN cd /build/garm && git checkout ${GARM_REF} RUN git clone https://github.com/cloudbase/garm-provider-azure /build/garm-provider-azure RUN git clone https://github.com/cloudbase/garm-provider-openstack /build/garm-provider-openstack +RUN git clone https://github.com/cloudbase/garm-provider-lxd /build/garm-provider-lxd +RUN git clone https://github.com/cloudbase/garm-provider-incus /build/garm-provider-incus +RUN git clone https://github.com/mercedes-benz/garm-provider-k8s /build/garm-provider-k8s RUN cd /build/garm && go build -o /bin/garm \ -tags osusergo,netgo,sqlite_omit_load_extension \ @@ -18,11 +21,17 @@ RUN cd /build/garm && go build -o /bin/garm \ RUN mkdir -p /opt/garm/providers.d RUN cd /build/garm-provider-azure && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-azure . RUN cd /build/garm-provider-openstack && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-openstack . +RUN cd /build/garm-provider-lxd && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-lxd . +RUN cd /build/garm-provider-incus && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-incus . +RUN cd /build/garm-provider-k8s && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-k8s . FROM scratch COPY --from=builder /bin/garm /bin/garm COPY --from=builder /opt/garm/providers.d/garm-provider-openstack /opt/garm/providers.d/garm-provider-openstack +COPY --from=builder /opt/garm/providers.d/garm-provider-lxd /opt/garm/providers.d/garm-provider-lxd +COPY --from=builder /opt/garm/providers.d/garm-provider-incus /opt/garm/providers.d/garm-provider-incus +COPY --from=builder /opt/garm/providers.d/garm-provider-k8s /opt/garm/providers.d/garm-provider-k8s COPY --from=builder /opt/garm/providers.d/garm-provider-azure /opt/garm/providers.d/garm-provider-azure COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ From ff5b9d22a781c7ce1c8ddd28bcd6234189be8db2 Mon Sep 17 00:00:00 2001 From: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com> Date: Mon, 18 Dec 2023 15:24:52 +0000 Subject: [PATCH 4/4] Fix k8s path Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 23fbece4..7f81a80e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,7 +23,7 @@ RUN cd /build/garm-provider-azure && go build -ldflags="-extldflags '-static' -s RUN cd /build/garm-provider-openstack && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-openstack . RUN cd /build/garm-provider-lxd && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-lxd . RUN cd /build/garm-provider-incus && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-incus . -RUN cd /build/garm-provider-k8s && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-k8s . +RUN cd /build/garm-provider-k8s/cmd/garm-provider-k8s && go build -ldflags="-extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-k8s . FROM scratch