diff --git a/go.mod b/go.mod index 8dca97001d2..6df421a6bc3 100644 --- a/go.mod +++ b/go.mod @@ -18,14 +18,14 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/opencontainers/image-spec v1.1.0-rc3 github.com/pkg/errors v0.9.1 - github.com/sigstore/sigstore v1.6.4 + github.com/sigstore/sigstore v1.6.5 github.com/spiffe/go-spiffe/v2 v2.1.5 github.com/spiffe/spire-api-sdk v1.6.3 github.com/tektoncd/plumbing v0.0.0-20220817140952-3da8ce01aeeb go.opencensus.io v0.24.0 go.uber.org/zap v1.24.0 golang.org/x/exp v0.0.0-20230307190834-24139beb5833 - golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/square/go-jose.v2 v2.6.0 k8s.io/api v0.25.9 @@ -44,6 +44,10 @@ require ( code.gitea.io/sdk/gitea v0.15.1 github.com/goccy/kpoward v0.1.0 github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221030203717-1711cefd7eec + github.com/sigstore/sigstore/pkg/signature/kms/aws v1.6.5 + github.com/sigstore/sigstore/pkg/signature/kms/azure v1.6.5 + github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.6.5 + github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.6.5 go.opentelemetry.io/otel v1.16.0 go.opentelemetry.io/otel/exporters/jaeger v1.16.0 go.opentelemetry.io/otel/sdk v1.16.0 @@ -68,8 +72,12 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v0.13.0 // indirect cloud.google.com/go/kms v1.10.1 // indirect - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/service/kms v1.21.1 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect @@ -92,10 +100,14 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/vault/api v1.9.1 // indirect github.com/jellydator/ttlcache/v3 v3.0.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect + github.com/mattn/go-colorable v0.1.9 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/skeema/knownhosts v1.1.0 // indirect github.com/theupdateframework/go-tuf v0.5.2 // indirect @@ -192,9 +204,9 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/automaxprocs v1.4.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/crypto v0.8.0 // indirect + golang.org/x/crypto v0.9.0 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect golang.org/x/sync v0.2.0 golang.org/x/sys v0.8.0 // indirect golang.org/x/term v0.8.0 // indirect diff --git a/go.sum b/go.sum index 4c816e4c07c..b41f2095f0f 100644 --- a/go.sum +++ b/go.sum @@ -59,6 +59,16 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -83,15 +93,13 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -146,7 +154,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.44.257 h1:HwelXYZZ8c34uFFhgVw3ybu2gB5fkk8KLj2idTvzZb8= +github.com/aws/aws-sdk-go v1.44.248 h1:GvkxpgsxqNc03LmhXiaxKpzbyxndnex7V+OThLx4g5M= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= @@ -376,6 +384,7 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE= github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -653,7 +662,7 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= @@ -773,6 +782,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8= github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -786,13 +797,13 @@ github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHef github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -907,6 +918,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -988,8 +1001,16 @@ github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260/go.mod h1:hAF0iL github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk= github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/sigstore v1.6.4 h1:jH4AzR7qlEH/EWzm+opSpxCfuUcjHL+LJPuQE7h40WE= -github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA= +github.com/sigstore/sigstore v1.6.5 h1:/liHIo7YPJp6sN31DzBYDOuRPmN1xbzROMBE5DLllYM= +github.com/sigstore/sigstore v1.6.5/go.mod h1:h+EoQsf9+6UKgNYxKhBcPgo4PZeEVfzAJxKRRIYhyN4= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.6.5 h1:SXzp0/S2fBT3O45BKBxInJ5zLqPTqN/nJRG9ZU6ZS1o= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.6.5/go.mod h1:iJ1fhlOHajTppwDbFpSrrxvteZcbDt96yOkAQZ3AWdY= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.6.5 h1:Eq7FRpCmtWgqO0W8PTV4uEtoLfVOJfDuLYGEwCN1Cy8= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.6.5/go.mod h1:dmXaolTdykiQwqCb4WIGjEQWWz0V9NQ+OI2HeeWETRk= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.6.5 h1:ayZpTdJReIyvS1LrXlTFo+cP7WUJalQAs3Bc2PWf6Zk= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.6.5/go.mod h1:xFrqobvdK11t7uvLovNwTcH8ht7/aqR+KIF93oTw+LQ= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.6.5 h1:mddEhD42KiPm3ZCW8gNa0bI6gcAYzTEPgqykweE6uGM= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.6.5/go.mod h1:uvKjzkw4TdbyAwQLxDdGD/Q2CKqiZGzgEb/0T3SZG5I= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1188,8 +1209,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1295,8 +1316,8 @@ golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1311,8 +1332,8 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1415,6 +1436,7 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md new file mode 100644 index 00000000000..7ecc8f2a967 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -0,0 +1,565 @@ +# Release History + +## 1.6.0 (2023-05-04) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `TenantID` field to `policy.TokenRequestOptions`. + +## 1.5.0 (2023-04-06) + +### Features Added +* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry. + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +> These features will return in v1.6.0-beta.1. +* Removed `TokenRequestOptions.Claims` and `.TenantID` +* Removed ARM client support for CAE and cross-tenant auth. + +### Bugs Fixed +* Added non-conformant LRO terminal states `Cancelled` and `Completed`. + +### Other Changes +* Updated to latest `internal` module. + +## 1.5.0-beta.1 (2023-03-02) + +### Features Added +* This release includes the features added in v1.4.0-beta.1 + +## 1.4.0 (2023-03-02) +> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1. + +### Features Added +* Add `Clone()` method for `arm/policy.ClientOptions`. + +### Bugs Fixed +* ARM's RP registration policy will no longer swallow unrecognized errors. +* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`. +* Fixed wrong policy copy in `arm/runtime.NewPipeline()`. + +## 1.4.0-beta.1 (2023-02-02) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`. +* ARM bearer token policy handles CAE challenges. + +## 1.3.1 (2023-02-02) + +### Other Changes +* Update dependencies to latest versions. + +## 1.3.0 (2023-01-06) + +### Features Added +* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy` + with custom authorization logic +* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively. + +### Other Changes +* Updated `internal` module to latest version. +* `policy/Request.SetBody()` allows replacing a request's body with an empty one + +## 1.2.0 (2022-11-04) + +### Features Added +* Added `ClientOptions.APIVersion` field, which overrides the default version a client + requests of the service, if the client supports this (all ARM clients do). +* Added package `tracing` that contains the building blocks for distributed tracing. +* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. + +### Bugs Fixed +* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. +* Fixed the MaxRetryDelay default to be 60s. +* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. +* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. + +### Other Changes +* Retain contents of read-only fields when sending requests. + +## 1.1.4 (2022-10-06) + +### Bugs Fixed +* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. +* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string + +### Other Changes +* Removed logging URL from retry policy as it's redundant. +* Retry policy logs when it exits due to a non-retriable status code. + +## 1.1.3 (2022-09-01) + +### Bugs Fixed +* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. + +## 1.1.2 (2022-08-09) + +### Other Changes +* Fixed various doc bugs. + +## 1.1.1 (2022-06-30) + +### Bugs Fixed +* Avoid polling when a RELO LRO synchronously terminates. + +## 1.1.0 (2022-06-03) + +### Other Changes +* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests. + +## 1.0.0 (2022-05-12) + +### Features Added +* Added interface `runtime.PollingHandler` to support custom poller implementations. + * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. + +### Breaking Changes +* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` +* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` +* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` +* Removed `TokenRequestOptions.TenantID` +* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` +* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` +* Removed `arm/runtime.FinalStateVia` and related `const` values +* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` +* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. +* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` +* `TokenCredential.GetToken` now returns `AccessToken` by value. + +### Bugs Fixed +* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. +* The `Operation-Location` poller now properly handles `final-state-via` values. +* Improvements in `runtime.Poller[T]` + * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. + * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. + +### Other Changes +* Updated to latest `internal` module and absorbed breaking changes. + * Use `temporal.Resource` and deleted copy. +* The internal poller implementation has been refactored. + * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. + * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. + * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. +* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` +* Default User-Agent headers no longer include `azcore` version information + +## 0.23.1 (2022-04-14) + +### Bugs Fixed +* Include XML header when marshalling XML content. +* Handle XML namespaces when searching for error code. +* Handle `odata.error` when searching for error code. + +## 0.23.0 (2022-04-04) + +### Features Added +* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. +* Added `cloud` package with a new API for cloud configuration +* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. + +### Breaking Changes +* Removed the `Poller` type-alias to the internal poller implementation. +* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. +* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. +* Replaced `arm.Endpoint` with `cloud` API + * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` + * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` +* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. + * Removed the `pollerID` parameter as it's no longer required. + * Created optional parameter structs and moved optional parameters into them. +* Changed `FinalStateVia` field to a `const` type. + +### Other Changes +* Converted expiring resource and dependent types to use generics. + +## 0.22.0 (2022-03-03) + +### Features Added +* Added header `WWW-Authenticate` to the default allow-list of headers for logging. +* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. + * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). + +### Breaking Changes +* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. + +## 0.21.1 (2022-02-04) + +### Bugs Fixed +* Restore response body after reading in `Poller.FinalResponse()`. (#16911) +* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) + +### Other Changes +* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) + +## 0.21.0 (2022-01-11) + +### Features Added +* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. +* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. + +### Breaking Changes +* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` +* Renamed `arm/ClientOptions.Host` to `.Endpoint` +* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` +* Removed `azcore.HTTPResponse` interface type +* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter +* `runtime.NewResponseError()` no longer requires an `error` parameter + +## 0.20.0 (2021-10-22) + +### Breaking Changes +* Removed `arm.Connection` +* Removed `azcore.Credential` and `.NewAnonymousCredential()` + * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` +* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication +* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` +* Contents in the `log` package have been slightly renamed. +* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` +* Changed parameters for `NewBearerTokenPolicy()` +* Moved policy config options out of `arm/runtime` and into `arm/policy` + +### Features Added +* Updating Documentation +* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints +* `azcore.ClientOptions` contains common pipeline configuration settings +* Added support for multi-tenant authorization in `arm/runtime` +* Require one second minimum when calling `PollUntilDone()` + +### Bug Fixes +* Fixed a potential panic when creating the default Transporter. +* Close LRO initial response body when creating a poller. +* Fixed a panic when recursively cloning structs that contain time.Time. + +## 0.19.0 (2021-08-25) + +### Breaking Changes +* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). + * `azcore` has all core functionality. + * `log` contains facilities for configuring in-box logging. + * `policy` is used for configuring pipeline options and creating custom pipeline policies. + * `runtime` contains various helpers used by SDK authors and generated content. + * `streaming` has helpers for streaming IO operations. +* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. + * As a result, the `Request.Telemetry()` method has been removed. +* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. +* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. +* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. + +### Bug Fixes +* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. + +### Other Changes +* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. + * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. +* Poller logic has been consolidated across ARM and core implementations. + * This required some changes to the internal interfaces for core pollers. +* The core poller types have been improved, including more logging and test coverage. + +## 0.18.1 (2021-08-20) + +### Features Added +* Adds an `ETag` type for comparing etags and handling etags on requests +* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object + +### Bugs Fixed +* `JoinPaths` will preserve query parameters encoded in the `root` url. + +### Other Changes +* Bumps dependency on `internal` module to the latest version (v0.7.0) + +## 0.18.0 (2021-07-29) +### Features Added +* Replaces methods from Logger type with two package methods for interacting with the logging functionality. +* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` +* `azcore.SetListener` replaces `azcore.Logger().SetListener` + +### Breaking Changes +* Removes `Logger` type from `azcore` + + +## 0.17.0 (2021-07-27) +### Features Added +* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) +* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) + +### Breaking Changes +* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) +* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) +* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) + + +## 0.16.2 (2021-05-26) +### Features Added +* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) + + +## 0.16.1 (2021-05-19) +### Features Added +* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) + + +## 0.16.0 (2021-05-07) +### Features Added +* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) + + +## 0.15.1 (2021-05-06) +### Features Added +* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) + + +## 0.15.0 (2021-05-05) +### Features Added +* Add support for null map and slice +* Export `Response.Payload` method + +### Breaking Changes +* remove `Response.UnmarshalError` as it's no longer required + + +## 0.14.5 (2021-04-23) +### Features Added +* Add `UnmarshalError()` on `azcore.Response` + + +## 0.14.4 (2021-04-22) +### Features Added +* Support for basic LRO polling +* Added type `LROPoller` and supporting types for basic polling on long running operations. +* rename poller param and added doc comment + +### Bugs Fixed +* Fixed content type detection bug in logging. + + +## 0.14.3 (2021-03-29) +### Features Added +* Add support for multi-part form data +* Added method `WriteMultipartFormData()` to Request. + + +## 0.14.2 (2021-03-17) +### Features Added +* Add support for encoding JSON null values +* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. +* Documentation fixes + +### Bugs Fixed +* Fixed improper error wrapping + + +## 0.14.1 (2021-02-08) +### Features Added +* Add `Pager` and `Poller` interfaces to azcore + + +## 0.14.0 (2021-01-12) +### Features Added +* Accept zero-value options for default values +* Specify zero-value options structs to accept default values. +* Remove `DefaultXxxOptions()` methods. +* Do not silently change TryTimeout on negative values +* make per-try timeout opt-in + + +## 0.13.4 (2020-11-20) +### Features Added +* Include telemetry string in User Agent + + +## 0.13.3 (2020-11-20) +### Features Added +* Updating response body handling on `azcore.Response` + + +## 0.13.2 (2020-11-13) +### Features Added +* Remove implementation of stateless policies as first-class functions. + + +## 0.13.1 (2020-11-05) +### Features Added +* Add `Telemetry()` method to `azcore.Request()` + + +## 0.13.0 (2020-10-14) +### Features Added +* Rename `log` to `logger` to avoid name collision with the log package. +* Documentation improvements +* Simplified `DefaultHTTPClientTransport()` implementation + + +## 0.12.1 (2020-10-13) +### Features Added +* Update `internal` module dependence to `v0.5.0` + + +## 0.12.0 (2020-10-08) +### Features Added +* Removed storage specific content +* Removed internal content to prevent API clutter +* Refactored various policy options to conform with our options pattern + + +## 0.11.0 (2020-09-22) +### Features Added + +* Removed `LogError` and `LogSlowResponse`. +* Renamed `options` in `RequestLogOptions`. +* Updated `NewRequestLogPolicy()` to follow standard pattern for options. +* Refactored `requestLogPolicy.Do()` per above changes. +* Cleaned up/added logging in retry policy. +* Export `NewResponseError()` +* Fix `RequestLogOptions` comment + + +## 0.10.1 (2020-09-17) +### Features Added +* Add default console logger +* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. +* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. +* Add `LogLongRunningOperation` + + +## 0.10.0 (2020-09-10) +### Features Added +* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. +* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. +* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. +* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. +* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. +* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. +* moved path concatenation into `JoinPaths()` func + + +## 0.9.6 (2020-08-18) +### Features Added +* Improvements to body download policy +* Always download the response body for error responses, i.e. HTTP status codes >= 400. +* Simplify variable declarations + + +## 0.9.5 (2020-08-11) +### Features Added +* Set the Content-Length header in `Request.SetBody` + + +## 0.9.4 (2020-08-03) +### Features Added +* Fix cancellation of per try timeout +* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. +* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. +* Do not drain response body if there are no more retries +* Do not retry non-idempotent operations when body download fails + + +## 0.9.3 (2020-07-28) +### Features Added +* Add support for custom HTTP request headers +* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. +* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. +* Remove method specific to Go 1.14 + + +## 0.9.2 (2020-07-28) +### Features Added +* Omit read-only content from request payloads +* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. +* Verify no fields were dropped +* Handle embedded struct types +* Added test for cloning by value +* Add messages to failures + + +## 0.9.1 (2020-07-22) +### Features Added +* Updated dependency on internal module to fix race condition. + + +## 0.9.0 (2020-07-09) +### Features Added +* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. +* Updated `sdk/internal` dependency to latest version. +* Rename package alias + + +## 0.8.2 (2020-06-29) +### Features Added +* Added missing documentation comments + +### Bugs Fixed +* Fixed a bug in body download policy. + + +## 0.8.1 (2020-06-26) +### Features Added +* Miscellaneous clean-up reported by linters + + +## 0.8.0 (2020-06-01) +### Features Added +* Differentiate between standard and URL encoding. + + +## 0.7.1 (2020-05-27) +### Features Added +* Add support for for base64 encoding and decoding of payloads. + + +## 0.7.0 (2020-05-12) +### Features Added +* Change `RetryAfter()` to a function. + + +## 0.6.0 (2020-04-29) +### Features Added +* Updating `RetryAfter` to only return the detaion in the RetryAfter header + + +## 0.5.0 (2020-03-23) +### Features Added +* Export `TransportFunc` + +### Breaking Changes +* Removed `IterationDone` + + +## 0.4.1 (2020-02-25) +### Features Added +* Ensure per-try timeout is properly cancelled +* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. +* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. +* `Logger.Should()` will return false if no listener is set. + + +## 0.4.0 (2020-02-18) +### Features Added +* Enable custom `RetryOptions` to be specified per API call +* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. +* Remove 429 from the list of default HTTP status codes for retry. +* Change StatusCodesForRetry to a slice so consumers can append to it. +* Added support for retry-after in HTTP-date format. +* Cleaned up some comments specific to storage. +* Remove `Request.SetQueryParam()` +* Renamed `MaxTries` to `MaxRetries` + +## 0.3.0 (2020-01-16) +### Features Added +* Added `DefaultRetryOptions` to create initialized default options. + +### Breaking Changes +* Removed `Response.CheckStatusCode()` + + +## 0.2.0 (2020-01-15) +### Features Added +* Add support for marshalling and unmarshalling JSON +* Removed `Response.Payload` field +* Exit early when unmarsahlling if there is no payload + + +## 0.1.0 (2020-01-10) +### Features Added +* Initial release diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt new file mode 100644 index 00000000000..48ea6616b5b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md new file mode 100644 index 00000000000..35a74e18d09 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md @@ -0,0 +1,39 @@ +# Azure Core Client Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) + +The `azcore` module provides a set of common interfaces and types for Go SDK client modules. +These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). + +## Getting started + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. +To add the latest version to your `go.mod` file, execute the following command. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azcore +``` + +General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml new file mode 100644 index 00000000000..aab9218538d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -0,0 +1,29 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go new file mode 100644 index 00000000000..9d077a3e126 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cloud + +var ( + // AzureChina contains configuration for Azure China. + AzureChina = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzureGovernment contains configuration for Azure Government. + AzureGovernment = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzurePublic contains configuration for Azure Public Cloud. + AzurePublic = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, + } +) + +// ServiceName identifies a cloud service. +type ServiceName string + +// ResourceManager is a global constant identifying Azure Resource Manager. +const ResourceManager ServiceName = "resourceManager" + +// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. +type ServiceConfiguration struct { + // Audience is the audience the client will request for its access tokens. + Audience string + // Endpoint is the service's base URL. + Endpoint string +} + +// Configuration configures a cloud. +type Configuration struct { + // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. + ActiveDirectoryAuthorityHost string + // Services contains configuration for the cloud's services. + Services map[ServiceName]ServiceConfiguration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go new file mode 100644 index 00000000000..985b1bde2f2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go @@ -0,0 +1,53 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/* +Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. + +Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as +"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other +Azure Clouds to configure clients appropriately. + +This package contains predefined configuration for well-known sovereign clouds such as Azure Government and +Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For +example, configuring a credential and ARM client for Azure Government: + + opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) + +Applications deployed to a private cloud such as Azure Stack create a Configuration object with +appropriate values: + + c := cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://...", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "...", + Endpoint: "https://...", + }, + }, + } + opts := azcore.ClientOptions{Cloud: c} + + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) +*/ +package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go new file mode 100644 index 00000000000..72c2cf21eef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -0,0 +1,113 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// AccessToken represents an Azure service bearer access token with expiry information. +type AccessToken = exported.AccessToken + +// TokenCredential represents a credential capable of providing an OAuth token. +type TokenCredential = exported.TokenCredential + +// holds sentinel values used to send nulls +var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} + +// NullValue is used to send an explicit 'null' within a request. +// This is typically used in JSON-MERGE-PATCH operations to delete a value. +func NullValue[T any]() T { + t := shared.TypeOfT[T]() + v, found := nullables[t] + if !found { + var o reflect.Value + if k := t.Kind(); k == reflect.Map { + o = reflect.MakeMap(t) + } else if k == reflect.Slice { + // empty slices appear to all point to the same data block + // which causes comparisons to become ambiguous. so we create + // a slice with len/cap of one which ensures a unique address. + o = reflect.MakeSlice(t, 1, 1) + } else { + o = reflect.New(t.Elem()) + } + v = o.Interface() + nullables[t] = v + } + // return the sentinel object + return v.(T) +} + +// IsNullValue returns true if the field contains a null sentinel value. +// This is used by custom marshallers to properly encode a null value. +func IsNullValue[T any](v T) bool { + // see if our map has a sentinel object for this *T + t := reflect.TypeOf(v) + if o, found := nullables[t]; found { + o1 := reflect.ValueOf(o) + v1 := reflect.ValueOf(v) + // we found it; return true if v points to the sentinel object. + // NOTE: maps and slices can only be compared to nil, else you get + // a runtime panic. so we compare addresses instead. + return o1.Pointer() == v1.Pointer() + } + // no sentinel object for this *t + return false +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions = policy.ClientOptions + +// Client is a basic HTTP client. It consists of a pipeline and tracing provider. +type Client struct { + pl runtime.Pipeline + tr tracing.Tracer +} + +// NewClient creates a new Client instance with the provided values. +// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans +// - moduleVersion - the semantic version of the containing module; used by the telemetry policy +// - plOpts - pipeline configuration options; can be the zero-value +// - options - optional client configurations; pass nil to accept the default values +func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { + pkg, err := shared.ExtractPackageName(clientName) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + + if !options.Telemetry.Disabled { + if err := shared.ValidateModVer(moduleVersion); err != nil { + return nil, err + } + } + + pl := runtime.NewPipeline(pkg, moduleVersion, plOpts, options) + + tr := options.TracingProvider.NewTracer(clientName, moduleVersion) + return &Client{pl: pl, tr: tr}, nil +} + +// Pipeline returns the pipeline for this client. +func (c *Client) Pipeline() runtime.Pipeline { + return c.pl +} + +// Tracer returns the tracer for this client. +func (c *Client) Tracer() tracing.Tracer { + return c.tr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go new file mode 100644 index 00000000000..28c64678c76 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -0,0 +1,257 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. + +The middleware consists of three components. + + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. + +# Implementing the Policy Interface + +A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as +a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share +the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to +avoid race conditions. + +A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can +perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, +and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request +work, it must call the Next() method on the *policy.Request instance in order to pass the request to the +next Policy in the chain. + +When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance +can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response +body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response +and error instances to its caller. + +Template for implementing a stateless Policy: + + type policyFunc func(*policy.Request) (*http.Response, error) + + // Do implements the Policy interface on policyFunc. + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } + + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + }) + } + +Template for implementing a stateful Policy: + + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } + + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } + + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + } + +# Implementing the Transporter Interface + +The Transporter interface is responsible for sending the HTTP request and returning the corresponding +HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter +implementation uses a shared http.Client from the standard library. + +The same stateful/stateless rules for Policy implementations apply to Transporter implementations. + +# Using Policy and Transporter Instances Via a Pipeline + +To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. + + func NewPipeline(transport Transporter, policies ...Policy) Pipeline + +The specified Policy instances form a chain and are invoked in the order provided to NewPipeline +followed by the Transporter. + +Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + + func (p Pipeline) Do(req *Request) (*http.Request, error) + +The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter +instances. The response/error is then sent through the same chain of Policy instances in reverse +order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with +TransportA. + + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + +The flow of Request and Response looks like the following: + + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + +# Creating a Request Instance + +The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also +contains some internal state and provides various convenience methods. You create a Request instance +by calling the runtime.NewRequest function: + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + +If the Request should contain a body, call the SetBody method. + + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + +A seekable stream is required so that upon retry, the retry Policy instance can seek the stream +back to the beginning before retrying the network request and re-uploading the body. + +# Sending an Explicit Null + +Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. + + { + "delete-me": null + } + +This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as +a means to resolve the ambiguity between a field to be excluded and its zero-value. + + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } + +In the above example, Name and Count are defined as pointer-to-type to disambiguate between +a missing value (nil) and a zero-value (0) which might have semantic differences. + +In a PATCH operation, any fields left as nil are to have their values preserved. When updating +a Widget's count, one simply specifies the new value for Count, leaving Name nil. + +To fulfill the requirement for sending a JSON null, the NullValue() function can be used. + + w := Widget{ + Count: azcore.NullValue[*int](), + } + +This sends an explict "null" for Count, indicating that any current value for Count should be deleted. + +# Processing the Response + +When the HTTP response is received, the *http.Response is returned directly. Each Policy instance +can inspect/mutate the *http.Response. + +# Built-in Logging + +To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. + +By default the logger writes to stderr. This can be customized by calling log.SetListener, providing +a callback that writes to the desired location. Any custom logging implementation MUST provide its +own synchronization to handle concurrent invocations. + +See the docs for the log package for further details. + +# Pageable Operations + +Pageable operations return potentially large data sets spread over multiple GET requests. The result of +each GET is a "page" of data consisting of a slice of items. + +Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. + + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + +The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages +and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. + + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } + +# Long-Running Operations + +Long-running operations (LROs) are operations consisting of an initial request to start the operation followed +by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one +of the following values. + + - Succeeded - the LRO completed successfully + - Failed - the LRO failed to complete + - Canceled - the LRO was canceled + +LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. + + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + +When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. +It does _not_ mean that the widget has been created or updated (or failed to be created/updated). + +The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, +call the PollUntilDone() method. + + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result + +The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the +context is canceled/timed out. + +Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to +this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation +mechanism as required. + +# Resume Tokens + +Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to +recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. + + token, err := poller.ResumeToken() + // handle error + +Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls +to poller.Poll() might change the poller's state. In this case, a new token should be created. + +After the token has been obtained, it can be used to recreate an instance of the originating poller. + + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) + +When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. + +Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO +BeginA() will result in an error. +*/ +package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go new file mode 100644 index 00000000000..17bd50c6732 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go new file mode 100644 index 00000000000..23ea7e7c8ea --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -0,0 +1,48 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "strings" +) + +// ETag is a property used for optimistic concurrency during updates +// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 +// An ETag can be empty (""). +type ETag string + +// ETagAny is an ETag that represents everything, the value is "*" +const ETagAny ETag = "*" + +// Equals does a strong comparison of two ETags. Equals returns true when both +// ETags are not weak and the values of the underlying strings are equal. +func (e ETag) Equals(other ETag) bool { + return !e.IsWeak() && !other.IsWeak() && e == other +} + +// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match +// character-by-character, regardless of either or both being tagged as "weak". +func (e ETag) WeakEquals(other ETag) bool { + getStart := func(e1 ETag) int { + if e1.IsWeak() { + return 2 + } + return 0 + } + aStart := getStart(e) + bStart := getStart(other) + + aVal := e[aStart:] + bVal := other[bStart:] + + return aVal == bVal +} + +// IsWeak specifies whether the ETag is strong or weak. +func (e ETag) IsWeak() bool { + return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go new file mode 100644 index 00000000000..a1236b36252 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "io" + "net/http" + "time" +) + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// Exported as streaming.NopCloser(). +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// AccessToken represents an Azure service bearer access token with expiry information. +// Exported as azcore.AccessToken. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +// Exported as policy.TokenRequestOptions. +type TokenRequestOptions struct { + // Scopes contains the list of permission scopes required for the token. + Scopes []string + + // TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in + // their configured default tenants when this field isn't set. + TenantID string +} + +// TokenCredential represents a credential capable of providing an OAuth token. +// Exported as azcore.TokenCredential. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go new file mode 100644 index 00000000000..c44efd6eff5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -0,0 +1,97 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/http/httpguts" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +// Exported as policy.Policy. +type Policy interface { + // Do applies the policy to the specified Request. When implementing a Policy, mutate the + // request before calling req.Next() to move on to the next policy, and respond to the result + // before returning to the caller. + Do(req *Request) (*http.Response, error) +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +// Exported as runtime.Pipeline. +type Pipeline struct { + policies []Policy +} + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +// Exported as policy.Transporter. +type Transporter interface { + // Do sends the HTTP request and returns the HTTP response or error. + Do(req *http.Request) (*http.Response, error) +} + +// used to adapt a TransportPolicy to a Policy +type transportPolicy struct { + trans Transporter +} + +func (tp transportPolicy) Do(req *Request) (*http.Response, error) { + if tp.trans == nil { + return nil, errors.New("missing transporter") + } + resp, err := tp.trans.Do(req.Raw()) + if err != nil { + return nil, err + } else if resp == nil { + // there was no response and no error (rare but can happen) + // this ensures the retry policy will retry the request + return nil, errors.New("received nil response") + } + return resp, nil +} + +// NewPipeline creates a new Pipeline object from the specified Policies. +// Not directly exported, but used as part of runtime.NewPipeline(). +func NewPipeline(transport Transporter, policies ...Policy) Pipeline { + // transport policy must always be the last in the slice + policies = append(policies, transportPolicy{trans: transport}) + return Pipeline{ + policies: policies, + } +} + +// Do is called for each and every HTTP request. It passes the request through all +// the Policy objects (which can transform the Request's URL/query parameters/headers) +// and ultimately sends the transformed HTTP request over the network. +func (p Pipeline) Do(req *Request) (*http.Response, error) { + if req == nil { + return nil, errors.New("request cannot be nil") + } + // check copied from Transport.roundTrip() + for k, vv := range req.Raw().Header { + if !httpguts.ValidHeaderFieldName(k) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) + } + } + } + req.policies = p.policies + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go new file mode 100644 index 00000000000..fa99d1b7ed1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -0,0 +1,182 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use NewRequest() instead. +// Exported as policy.Request. +type Request struct { + req *http.Request + body io.ReadSeekCloser + policies []Policy + values opValues +} + +type opValues map[reflect.Type]interface{} + +// Set adds/changes a value +func (ov opValues) set(value interface{}) { + ov[reflect.TypeOf(value)] = value +} + +// Get looks for a value set by SetValue first +func (ov opValues) get(value interface{}) bool { + v, ok := ov[reflect.ValueOf(value).Elem().Type()] + if ok { + reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) + } + return ok +} + +// NewRequest creates a new Request with the specified input. +// Exported as runtime.NewRequest(). +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) + if err != nil { + return nil, err + } + if req.URL.Host == "" { + return nil, errors.New("no Host in request URL") + } + if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { + return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) + } + return &Request{req: req}, nil +} + +// Body returns the original body specified when the Request was created. +func (req *Request) Body() io.ReadSeekCloser { + return req.body +} + +// Raw returns the underlying HTTP request. +func (req *Request) Raw() *http.Request { + return req.req +} + +// Next calls the next policy in the pipeline. +// If there are no more policies, nil and an error are returned. +// This method is intended to be called from pipeline policies. +// To send a request through a pipeline call Pipeline.Do(). +func (req *Request) Next() (*http.Response, error) { + if len(req.policies) == 0 { + return nil, errors.New("no more policies") + } + nextPolicy := req.policies[0] + nextReq := *req + nextReq.policies = nextReq.policies[1:] + return nextPolicy.Do(&nextReq) +} + +// SetOperationValue adds/changes a mutable key/value associated with a single operation. +func (req *Request) SetOperationValue(value interface{}) { + if req.values == nil { + req.values = opValues{} + } + req.values.set(value) +} + +// OperationValue looks for a value set by SetOperationValue(). +func (req *Request) OperationValue(value interface{}) bool { + if req.values == nil { + return false + } + return req.values.get(value) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length +// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", +// Content-Type won't be set. +// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. +func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + } + if size == 0 { + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + } + // keep a copy of the body argument. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else { + req.req.Header.Set(shared.HeaderContentType, contentType) + } + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (req *Request) RewindBody() error { + if req.body != nil { + // Reset the stream back to the beginning and restore the body + _, err := req.body.Seek(0, io.SeekStart) + req.req.Body = req.body + return err + } + return nil +} + +// Close closes the request body. +func (req *Request) Close() error { + if req.body == nil { + return nil + } + return req.body.Close() +} + +// Clone returns a deep copy of the request with its context changed to ctx. +func (req *Request) Clone(ctx context.Context) *Request { + r2 := *req + r2.req = req.req.Clone(ctx) + return &r2 +} + +// not exported but dependent on Request + +// PolicyFunc is a type that implements the Policy interface. +// Use this type when implementing a stateless policy as a first-class function. +type PolicyFunc func(*Request) (*http.Response, error) + +// Do implements the Policy interface on policyFunc. +func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go new file mode 100644 index 00000000000..7df2f88c1c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -0,0 +1,144 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "regexp" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// NewResponseError creates a new *ResponseError from the provided HTTP response. +// Exported as runtime.NewResponseError(). +func NewResponseError(resp *http.Response) error { + respErr := &ResponseError{ + StatusCode: resp.StatusCode, + RawResponse: resp, + } + + // prefer the error code in the response header + if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + respErr.ErrorCode = ec + return respErr + } + + // if we didn't get x-ms-error-code, check in the response body + body, err := exported.Payload(resp, nil) + if err != nil { + return err + } + + if len(body) > 0 { + if code := extractErrorCodeJSON(body); code != "" { + respErr.ErrorCode = code + } else if code := extractErrorCodeXML(body); code != "" { + respErr.ErrorCode = code + } + } + + return respErr +} + +func extractErrorCodeJSON(body []byte) string { + var rawObj map[string]interface{} + if err := json.Unmarshal(body, &rawObj); err != nil { + // not a JSON object + return "" + } + + // check if this is a wrapped error, i.e. { "error": { ... } } + // if so then unwrap it + if wrapped, ok := rawObj["error"]; ok { + unwrapped, ok := wrapped.(map[string]interface{}) + if !ok { + return "" + } + rawObj = unwrapped + } else if wrapped, ok := rawObj["odata.error"]; ok { + // check if this a wrapped odata error, i.e. { "odata.error": { ... } } + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } + + // now check for the error code + code, ok := rawObj["code"] + if !ok { + return "" + } + codeStr, ok := code.(string) + if !ok { + return "" + } + return codeStr +} + +func extractErrorCodeXML(body []byte) string { + // regular expression is much easier than dealing with the XML parser + rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) + res := rx.FindStringSubmatch(string(body)) + if len(res) != 2 { + return "" + } + // first submatch is the entire thing, second one is the captured error code + return res[1] +} + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// Exported as azcore.ResponseError. +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response +} + +// Error implements the error interface for type ResponseError. +// Note that the message contents are not contractual and can change over time. +func (e *ResponseError) Error() string { + // write the request method and URL with response status code + msg := &bytes.Buffer{} + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.ErrorCode != "" { + fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) + } else { + fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := exported.Payload(e.RawResponse, nil) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + + return msg.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go new file mode 100644 index 00000000000..0684cb31739 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This is an internal helper package to combine the complete logging APIs. +package log + +import ( + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Event = log.Event + +const ( + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO +) + +func Write(cls log.Event, msg string) { + log.Write(cls, msg) +} + +func Writef(cls log.Event, format string, a ...interface{}) { + log.Writef(cls, format, a...) +} + +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +func Should(cls log.Event) bool { + return log.Should(cls) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go new file mode 100644 index 00000000000..b05bd8b38d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -0,0 +1,159 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package async + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md + +// Applicable returns true if the LRO is using Azure-AsyncOperation. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderAzureAsync) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["asyncURL"] + return ok +} + +// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The URL from Azure-AsyncOperation header. + AsyncURL string `json:"asyncURL"` + + // The URL from Location header. + LocURL string `json:"locURL"` + + // The URL from the initial LRO request. + OrigURL string `json:"origURL"` + + // The HTTP method from the initial LRO request. + Method string `json:"method"` + + // The value of final-state-via from swagger, can be the empty string. + FinalState pollers.FinalStateVia `json:"finalState"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response and final-state type. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") + asyncURL := resp.Header.Get(shared.HeaderAzureAsync) + if asyncURL == "" { + return nil, errors.New("response is missing Azure-AsyncOperation header") + } + if !poller.IsValidURL(asyncURL) { + return nil, fmt.Errorf("invalid polling URL %s", asyncURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + p := &Poller[T]{ + pl: pl, + resp: resp, + AsyncURL: asyncURL, + LocURL: resp.Header.Get(shared.HeaderLocation), + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: state, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.CurState) { + return exported.NewResponseError(p.resp) + } + var req *exported.Request + var err error + if p.Method == http.MethodPatch || p.Method == http.MethodPut { + // for PATCH and PUT, the final GET is on the original resource URL + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost { + if p.FinalState == pollers.FinalStateViaAzureAsyncOp { + // no final GET required + } else if p.FinalState == pollers.FinalStateViaOriginalURI { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.LocURL != "" { + // ideally FinalState would be set to "location" but it isn't always. + // must check last due to more permissive condition. + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go new file mode 100644 index 00000000000..2bb9e105b66 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package body + +import ( + "context" + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "body" + +// Applicable returns true if the LRO is using no headers, just provisioning state. +// This is only applicable to PATCH and PUT methods and assumes no polling headers. +func Applicable(resp *http.Response) bool { + // we can't check for absense of headers due to some misbehaving services + // like redis that return a Location header but don't actually use that protocol + return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Body pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The poller's type, used for resume token processing. + Type string `json:"type"` + + // The URL for polling. + PollURL string `json:"pollURL"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Body poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Body poller.") + p := &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: resp.Request.URL.String(), + } + // default initial state to InProgress. depending on the HTTP + // status code and provisioning state, we might change the value. + curState := poller.StatusInProgress + provState, err := poller.GetProvisioningState(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if resp.StatusCode == http.StatusCreated && provState != "" { + // absense of provisioning state is ok for a 201, means the operation is in progress + curState = provState + } else if resp.StatusCode == http.StatusOK { + if provState != "" { + curState = provState + } else if provState == "" { + // for a 200, absense of provisioning state indicates success + curState = poller.StatusSucceeded + } + } else if resp.StatusCode == http.StatusNoContent { + curState = poller.StatusSucceeded + } + p.CurState = curState + return p, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + if resp.StatusCode == http.StatusNoContent { + p.resp = resp + p.CurState = poller.StatusSucceeded + return p.CurState, nil + } + state, err := poller.GetProvisioningState(resp) + if errors.Is(err, poller.ErrNoBody) { + // a missing response body in non-204 case is an error + return "", err + } else if state == "" { + // a response body without provisioning state is considered terminal success + state = poller.StatusSucceeded + } else if err != nil { + return "", err + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go new file mode 100644 index 00000000000..d6be89876ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package loc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "loc" + +// Applicable returns true if the LRO is using Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + Type string `json:"type"` + PollURL string `json:"pollURL"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Location poller.") + locURL := resp.Header.Get(shared.HeaderLocation) + if locURL == "" { + return nil, errors.New("response is missing Location header") + } + if !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid polling URL %s", locURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + return &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: locURL, + CurState: state, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + // location polling can return an updated polling URL + if h := resp.Header.Get(shared.HeaderLocation); h != "" { + p.PollURL = h + } + // if provisioning state is available, use that. this is only + // for some ARM LRO scenarios (e.g. DELETE with a Location header) + // so if it's missing then use HTTP status code. + provState, _ := poller.GetProvisioningState(resp) + p.resp = resp + if provState != "" { + p.CurState = provState + } else if resp.StatusCode == http.StatusAccepted { + p.CurState = poller.StatusInProgress + } else if resp.StatusCode > 199 && resp.StatusCode < 300 { + // any 2xx other than a 202 indicates success + p.CurState = poller.StatusSucceeded + } else { + p.CurState = poller.StatusFailed + } + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go new file mode 100644 index 00000000000..1bc7ad0aced --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -0,0 +1,145 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package op + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is using Operation-Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderOperationLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["oplocURL"] + return ok +} + +// Poller is an LRO poller that uses the Operation-Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + OpLocURL string `json:"oplocURL"` + LocURL string `json:"locURL"` + OrigURL string `json:"origURL"` + Method string `json:"method"` + FinalState pollers.FinalStateVia `json:"finalState"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Operation-Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Operation-Location poller.") + opURL := resp.Header.Get(shared.HeaderOperationLocation) + if opURL == "" { + return nil, errors.New("response is missing Operation-Location header") + } + if !poller.IsValidURL(opURL) { + return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) + } + locURL := resp.Header.Get(shared.HeaderLocation) + // Location header is optional + if locURL != "" && !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid Location URL %s", locURL) + } + // default initial state to InProgress. if the + // service sent us a status then use that instead. + curState := poller.StatusInProgress + status, err := poller.GetStatus(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if status != "" { + curState = status + } + + return &Poller[T]{ + pl: pl, + resp: resp, + OpLocURL: opURL, + LocURL: locURL, + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: curState, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + var req *exported.Request + var err error + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { + // no final GET required, terminal response should have it + } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { + return rlErr + } else if rl != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, rl) + } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go new file mode 100644 index 00000000000..37ed647f4e0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia string + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation FinalStateVia = "location" + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI FinalStateVia = "original-uri" + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation FinalStateVia = "operation-location" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go new file mode 100644 index 00000000000..d8d86a46c2d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -0,0 +1,187 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// getTokenTypeName creates a type name from the type parameter T. +func getTokenTypeName[T any]() (string, error) { + tt := shared.TypeOfT[T]() + var n string + if tt.Kind() == reflect.Pointer { + n = "*" + tt = tt.Elem() + } + n += tt.Name() + if n == "" { + return "", errors.New("nameless types are not allowed") + } + return n, nil +} + +type resumeTokenWrapper[T any] struct { + Type string `json:"type"` + Token T `json:"token"` +} + +// NewResumeToken creates a resume token from the specified type. +// An error is returned if the generic type has no name (e.g. struct{}). +func NewResumeToken[TResult, TSource any](from TSource) (string, error) { + n, err := getTokenTypeName[TResult]() + if err != nil { + return "", err + } + b, err := json.Marshal(resumeTokenWrapper[TSource]{ + Type: n, + Token: from, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +// ExtractToken returns the poller-specific token information from the provided token value. +func ExtractToken(token string) ([]byte, error) { + raw := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return nil, err + } + // this is dependent on the type resumeTokenWrapper[T] + tk, ok := raw["token"] + if !ok { + return nil, errors.New("missing token value") + } + return tk, nil +} + +// IsTokenValid returns an error if the specified token isn't applicable for generic type T. +func IsTokenValid[T any](token string) error { + raw := map[string]interface{}{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return err + } + t, ok := raw["type"] + if !ok { + return errors.New("missing type value") + } + tt, ok := t.(string) + if !ok { + return fmt.Errorf("invalid type format %T", t) + } + n, err := getTokenTypeName[T]() + if err != nil { + return err + } + if tt != n { + return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) + } + return nil +} + +// used if the operation synchronously completed +type NopPoller[T any] struct { + resp *http.Response + result T +} + +// NewNopPoller creates a NopPoller from the provided response. +// It unmarshals the response body into an instance of T. +func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { + np := &NopPoller[T]{resp: resp} + if resp.StatusCode == http.StatusNoContent { + return np, nil + } + payload, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(payload) == 0 { + return np, nil + } + if err = json.Unmarshal(payload, &np.result); err != nil { + return nil, err + } + return np, nil +} + +func (*NopPoller[T]) Done() bool { + return true +} + +func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { + return p.resp, nil +} + +func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { + *out = p.result + return nil +} + +// PollHelper creates and executes the request, calling update() with the response. +// If the request fails, the update func is not called. +// The update func returns the state of the operation for logging purposes or an error +// if it fails to extract the required state from the response. +func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error { + req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint) + if err != nil { + return err + } + resp, err := pl.Do(req) + if err != nil { + return err + } + state, err := update(resp) + if err != nil { + return err + } + log.Writef(log.EventLRO, "State %s", state) + return nil +} + +// ResultHelper processes the response as success or failure. +// In the success case, it unmarshals the payload into either a new instance of T or out. +// In the failure case, it creates an *azcore.Response error from the response. +func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { + // short-circuit the simple success case with no response body to unmarshal + if resp.StatusCode == http.StatusNoContent { + return nil + } + + defer resp.Body.Close() + if !poller.StatusCodeValid(resp) || failed { + // the LRO failed. unmarshall the error and update state + return azexported.NewResponseError(resp) + } + + // success case + payload, err := exported.Payload(resp, nil) + if err != nil { + return err + } + if len(payload) == 0 { + return nil + } + + if err = json.Unmarshal(payload, out); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go new file mode 100644 index 00000000000..681167bcba5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +const ( + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + HeaderAzureAsync = "Azure-AsyncOperation" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderLocation = "Location" + HeaderOperationLocation = "Operation-Location" + HeaderRetryAfter = "Retry-After" + HeaderUserAgent = "User-Agent" + HeaderWWWAuthenticate = "WWW-Authenticate" + HeaderXMSClientRequestID = "x-ms-client-request-id" +) + +const BearerTokenPrefix = "Bearer " + +const ( + // Module is the name of the calling module used in telemetry data. + Module = "azcore" + + // Version is the semantic version (see http://semver.org) of this module. + Version = "v1.6.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go new file mode 100644 index 00000000000..930ab8c8399 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -0,0 +1,92 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +import ( + "context" + "fmt" + "net/http" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +// CtxIncludeResponseKey is used as a context key for retrieving the raw response. +type CtxIncludeResponseKey struct{} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// RetryAfter returns non-zero if the response contains a Retry-After header value. +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + ra := resp.Header.Get(HeaderRetryAfter) + if ra == "" { + return 0 + } + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + return time.Until(t) + } + return 0 +} + +// TypeOfT returns the type of the generic type param. +func TypeOfT[T any]() reflect.Type { + // you can't, at present, obtain the type of + // a type parameter, so this is the trick + return reflect.TypeOf((*T)(nil)).Elem() +} + +// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. +type TransportFunc func(*http.Request) (*http.Response, error) + +// Do implements the Transporter interface for the TransportFunc type. +func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { + return pf(req) +} + +// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string. +func ValidateModVer(moduleVersion string) error { + modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`) + if !modVerRegx.MatchString(moduleVersion) { + return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion) + } + return nil +} + +// ExtractPackageName returns "package" from "package.Client". +// If clientName is malformed, an error is returned. +func ExtractPackageName(clientName string) (string, error) { + pkg, client, ok := strings.Cut(clientName, ".") + if !ok { + return "", fmt.Errorf("missing . in clientName %s", clientName) + } else if pkg == "" || client == "" { + return "", fmt.Errorf("malformed clientName %s", clientName) + } + return pkg, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go new file mode 100644 index 00000000000..2f3901bff3c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package log contains functionality for configuring logging behavior. +// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go new file mode 100644 index 00000000000..7bde29d0a46 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package log provides functionality for configuring logging facilities. +package log + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// Event is used to group entries. Each group can be toggled on or off. +type Event = log.Event + +const ( + // EventRequest entries contain information about HTTP requests. + // This includes information like the URL, query parameters, and headers. + EventRequest Event = "Request" + + // EventResponse entries contain information about HTTP responses. + // This includes information like the HTTP status code, headers, and request URL. + EventResponse Event = "Response" + + // EventRetryPolicy entries contain information specific to the retry policy in use. + EventRetryPolicy Event = "Retry" + + // EventLRO entries contain information specific to long-running operations. + // This includes information like polling location, operation state, and sleep intervals. + EventLRO Event = "LongRunningOperation" +) + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetEvents(cls ...Event) { + log.SetEvents(cls...) +} + +// SetListener will set the Logger to write to the specified Listener. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// for testing purposes +func resetEvents() { + log.TestResetEvents() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go new file mode 100644 index 00000000000..fad2579ed6c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package policy contains the definitions needed for configuring in-box pipeline policies +// and creating custom policies. +package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go new file mode 100644 index 00000000000..b200047834c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +type Policy = exported.Policy + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +type Transporter = exported.Transporter + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use runtime.NewRequest() instead. +type Request = exported.Request + +// ClientOptions contains optional settings for a client's pipeline. +// All zero-value fields will be initialized with default values. +type ClientOptions struct { + // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion string + + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. + Cloud cloud.Configuration + + // Logging configures the built-in logging policy. + Logging LogOptions + + // Retry configures the built-in retry policy. + Retry RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry TelemetryOptions + + // TracingProvider configures the tracing provider. + // It defaults to a no-op tracer. + TracingProvider tracing.Provider + + // Transport sets the transport for HTTP requests. + Transport Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []Policy +} + +// LogOptions configures the logging policy's behavior. +type LogOptions struct { + // IncludeBody indicates if request and response bodies should be included in logging. + // The default value is false. + // NOTE: enabling this can lead to disclosure of sensitive information, use with care. + IncludeBody bool + + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParams is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParams []string +} + +// RetryOptions configures the retry policy's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // This is disabled by default. Specify a value greater than zero to enable. + // NOTE: Setting this to a small value might cause premature HTTP request time-outs. + TryTimeout time.Duration + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The value is used only if the HTTP response does not contain a Retry-After header. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 60 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration + + // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. + // A nil slice will use the following values. + // http.StatusRequestTimeout 408 + // http.StatusTooManyRequests 429 + // http.StatusInternalServerError 500 + // http.StatusBadGateway 502 + // http.StatusServiceUnavailable 503 + // http.StatusGatewayTimeout 504 + // Specifying values will replace the default values. + // Specifying an empty slice will disable retries for HTTP status codes. + StatusCodes []int + + // ShouldRetry evaluates if the retry policy should retry the request. + // When specified, the function overrides comparison against the list of + // HTTP status codes and error checking within the retry policy. Context + // and NonRetriable errors remain evaluated before calling ShouldRetry. + // The *http.Response and error parameters are mutually exclusive, i.e. + // if one is nil, the other is not nil. + // A return value of true means the retry policy should retry. + ShouldRetry func(*http.Response, error) bool +} + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // ApplicationID is an application-specific identification string to add to the User-Agent. + // It has a maximum length of 24 characters and must not contain any spaces. + ApplicationID string + + // Disabled will prevent the addition of any telemetry data to the User-Agent. + Disabled bool +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +type TokenRequestOptions = exported.TokenRequestOptions + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request. + // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from + // its given credential. + AuthorizationHandler AuthorizationHandler +} + +// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. +type AuthorizationHandler struct { + // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token + // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't + // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a + // token from its credential according to its configuration. + OnRequest func(*Request, func(TokenRequestOptions) error) error + + // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the + // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible + // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's + // given credential. Implementations that need to perform I/O should use the Request's context, available from + // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, + // the policy will return any 401 response to the client. + OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go new file mode 100644 index 00000000000..c9cfa438cb3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package runtime contains various facilities for creating requests and handling responses. +// The content is intended for SDK authors. +package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go new file mode 100644 index 00000000000..6d03b291ebf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. +// Call this when a service request returns a non-successful status code. +func NewResponseError(resp *http.Response) error { + return exported.NewResponseError(resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go new file mode 100644 index 00000000000..5507665d651 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" +) + +// PagingHandler contains the required data for constructing a Pager. +type PagingHandler[T any] struct { + // More returns a boolean indicating if there are more pages to fetch. + // It uses the provided page to make the determination. + More func(T) bool + + // Fetcher fetches the first and subsequent pages. + Fetcher func(context.Context, *T) (T, error) +} + +// Pager provides operations for iterating over paged responses. +type Pager[T any] struct { + current *T + handler PagingHandler[T] + firstPage bool +} + +// NewPager creates an instance of Pager using the specified PagingHandler. +// Pass a non-nil T for firstPage if the first page has already been retrieved. +func NewPager[T any](handler PagingHandler[T]) *Pager[T] { + return &Pager[T]{ + handler: handler, + firstPage: true, + } +} + +// More returns true if there are more pages to retrieve. +func (p *Pager[T]) More() bool { + if p.current != nil { + return p.handler.More(*p.current) + } + return true +} + +// NextPage advances the pager to the next page. +func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { + var resp T + var err error + if p.current != nil { + if p.firstPage { + // we get here if it's an LRO-pager, we already have the first page + p.firstPage = false + return *p.current, nil + } else if !p.handler.More(*p.current) { + return *new(T), errors.New("no more pages") + } + resp, err = p.handler.Fetcher(ctx, p.current) + } else { + // non-LRO case, first page + p.firstPage = false + resp, err = p.handler.Fetcher(ctx, nil) + } + if err != nil { + return *new(T), err + } + p.current = &resp + return *p.current, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. +func (p *Pager[T]) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.current) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go new file mode 100644 index 00000000000..9d9288f53d3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -0,0 +1,66 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// PipelineOptions contains Pipeline options for SDK developers +type PipelineOptions struct { + AllowedHeaders, AllowedQueryParameters []string + APIVersion APIVersionOptions + PerCall, PerRetry []policy.Policy +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +type Pipeline = exported.Pipeline + +// NewPipeline creates a pipeline from connection options, with any additional policies as specified. +// Policies from ClientOptions are placed after policies from PipelineOptions. +// The module and version parameters are used by the telemetry policy, when enabled. +func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { + cp := policy.ClientOptions{} + if options != nil { + cp = *options + } + if len(plOpts.AllowedHeaders) > 0 { + headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + copy(headers, plOpts.AllowedHeaders) + headers = append(headers, cp.Logging.AllowedHeaders...) + cp.Logging.AllowedHeaders = headers + } + if len(plOpts.AllowedQueryParameters) > 0 { + qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + copy(qp, plOpts.AllowedQueryParameters) + qp = append(qp, cp.Logging.AllowedQueryParams...) + cp.Logging.AllowedQueryParams = qp + } + // we put the includeResponsePolicy at the very beginning so that the raw response + // is populated with the final response (some policies might mutate the response) + policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)} + if cp.APIVersion != "" { + policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) + } + if !cp.Telemetry.Disabled { + policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) + } + policies = append(policies, plOpts.PerCall...) + policies = append(policies, cp.PerCallPolicies...) + policies = append(policies, NewRetryPolicy(&cp.Retry)) + policies = append(policies, plOpts.PerRetry...) + policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, NewLogPolicy(&cp.Logging)) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy)) + transport := cp.Transport + if transport == nil { + transport = defaultHTTPClient + } + return exported.NewPipeline(transport, policies...) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go new file mode 100644 index 00000000000..e5309aa6c15 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// APIVersionOptions contains options for API versions +type APIVersionOptions struct { + // Location indicates where to set the version on a request, for example in a header or query param + Location APIVersionLocation + // Name is the name of the header or query parameter, for example "api-version" + Name string +} + +// APIVersionLocation indicates which part of a request identifies the service version +type APIVersionLocation int + +const ( + // APIVersionLocationQueryParam indicates a query parameter + APIVersionLocationQueryParam = 0 + // APIVersionLocationHeader indicates a header + APIVersionLocationHeader = 1 +) + +// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version +// isn't empty and opts.Name is empty, Do will return an error. +func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { + if opts == nil { + opts = &APIVersionOptions{} + } + return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} +} + +// apiVersionPolicy enables users to set the API version of every request a client sends. +type apiVersionPolicy struct { + // location indicates whether "name" refers to a query parameter or header. + location APIVersionLocation + + // name of the query param or header whose value should be overridden; provided by the client. + name string + + // version is the value (provided by the user) that replaces the default version value. + version string +} + +// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. +func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { + if a.version != "" { + if a.name == "" { + // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions + return nil, errors.New("this client doesn't support overriding its API version") + } + switch a.location { + case APIVersionLocationHeader: + req.Raw().Header.Set(a.name, a.version) + case APIVersionLocationQueryParam: + q := req.Raw().URL.Query() + q.Set(a.name, a.version) + req.Raw().URL.RawQuery = q.Encode() + default: + return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go new file mode 100644 index 00000000000..b61e4c121f6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -0,0 +1,116 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + // mainResource is the resource to be retreived using the tenant specified in the credential + mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] + // the following fields are read-only + authzHandler policy.AuthorizationHandler + cred exported.TokenCredential + scopes []string +} + +type acquiringResourceState struct { + req *policy.Request + p *BearerTokenPolicy + tro policy.TokenRequestOptions +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro) + if err != nil { + return exported.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// scopes: the list of permission scopes required for the token. +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &policy.BearerTokenOptions{} + } + return &BearerTokenPolicy{ + authzHandler: opts.AuthorizationHandler, + cred: cred, + scopes: scopes, + mainResource: temporal.NewResource(acquire), + } +} + +// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential +func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { + return func(tro policy.TokenRequestOptions) error { + as := acquiringResourceState{p: b, req: req, tro: tro} + tk, err := b.mainResource.Get(as) + if err != nil { + return err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return nil + } +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + var err error + if b.authzHandler.OnRequest != nil { + err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) + } else { + err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) + } + if err != nil { + return nil, ensureNonRetriable(err) + } + + res, err := req.Next() + if err != nil { + return nil, err + } + + if res.StatusCode == http.StatusUnauthorized { + b.mainResource.Expire() + if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + res, err = req.Next() + } + } + } + return res, ensureNonRetriable(err) +} + +func ensureNonRetriable(err error) error { + var nre errorinfo.NonRetriable + if err != nil && !errors.As(err, &nre) { + err = btpError{err} + } + return err +} + +// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize +type btpError struct { + error +} + +func (btpError) NonRetriable() {} + +var _ errorinfo.NonRetriable = (*btpError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go new file mode 100644 index 00000000000..99dc029f0c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. +func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if err != nil { + return resp, err + } + var opValues bodyDownloadPolicyOpValues + // don't skip downloading error response bodies + if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { + return resp, err + } + // Either bodyDownloadPolicyOpValues was not specified (so skip is false) + // or it was specified and skip is false: don't skip downloading the body + _, err = Payload(resp) + if err != nil { + return resp, newBodyDownloadError(err, req) + } + return resp, err +} + +// bodyDownloadPolicyOpValues is the struct containing the per-operation values +type bodyDownloadPolicyOpValues struct { + Skip bool +} + +type bodyDownloadError struct { + err error +} + +func newBodyDownloadError(err error, req *policy.Request) error { + // on failure, only retry the request for idempotent operations. + // we currently identify them as DELETE, GET, and PUT requests. + if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { + // error is safe for retry + return err + } + // wrap error to avoid retries + return &bodyDownloadError{ + err: err, + } +} + +func (b *bodyDownloadError) Error() string { + return fmt.Sprintf("body download policy: %s", b.err.Error()) +} + +func (b *bodyDownloadError) NonRetriable() { + // marker method +} + +func (b *bodyDownloadError) Unwrap() error { + return b.err +} + +var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go new file mode 100644 index 00000000000..770e0a2b6a6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request +func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { + // check if any custom HTTP headers have been specified + if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { + for k, v := range header.(http.Header) { + // use Set to replace any existing value + // it also canonicalizes the header key + req.Raw().Header.Set(k, v[0]) + // add any remaining values + for i := 1; i < len(v); i++ { + req.Raw().Header.Add(k, v[i]) + } + } + } + return req.Next() +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go new file mode 100644 index 00000000000..4714baa30cd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -0,0 +1,34 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request +func includeResponsePolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if resp == nil { + return resp, err + } + if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + httpOut := httpOutRaw.(**http.Response) + *httpOut = resp + } + return resp, err +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go new file mode 100644 index 00000000000..8514f57d5c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -0,0 +1,263 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" +) + +type logPolicy struct { + includeBody bool + allowedHeaders map[string]struct{} + allowedQP map[string]struct{} +} + +// NewLogPolicy creates a request/response logging policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewLogPolicy(o *policy.LogOptions) policy.Policy { + if o == nil { + o = &policy.LogOptions{} + } + // construct default hash set of allowed headers + allowedHeaders := map[string]struct{}{ + "accept": {}, + "cache-control": {}, + "connection": {}, + "content-length": {}, + "content-type": {}, + "date": {}, + "etag": {}, + "expires": {}, + "if-match": {}, + "if-modified-since": {}, + "if-none-match": {}, + "if-unmodified-since": {}, + "last-modified": {}, + "ms-cv": {}, + "pragma": {}, + "request-id": {}, + "retry-after": {}, + "server": {}, + "traceparent": {}, + "transfer-encoding": {}, + "user-agent": {}, + "www-authenticate": {}, + "x-ms-request-id": {}, + "x-ms-client-request-id": {}, + "x-ms-return-client-request-id": {}, + } + // add any caller-specified allowed headers to the set + for _, ah := range o.AllowedHeaders { + allowedHeaders[strings.ToLower(ah)] = struct{}{} + } + // now do the same thing for query params + allowedQP := getAllowedQueryParams(o.AllowedQueryParams) + return &logPolicy{ + includeBody: o.IncludeBody, + allowedHeaders: allowedHeaders, + allowedQP: allowedQP, + } +} + +// getAllowedQueryParams merges the default set of allowed query parameters +// with a custom set (usually comes from client options). +func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} { + allowedQP := map[string]struct{}{ + "api-version": {}, + } + for _, qp := range customAllowedQP { + allowedQP[strings.ToLower(qp)] = struct{}{} + } + return allowedQP +} + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + // Log the outgoing request as informational + if log.Should(log.EventRequest) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + var err error + if p.includeBody { + err = writeReqBody(req, b) + } + log.Write(log.EventRequest, b.String()) + if err != nil { + return nil, err + } + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err := req.Next() // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(opValues.start) + + if log.Should(log.EventResponse) { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE RECEIVED\n") + } + + p.writeRequestWithResponse(b, req, response, err) + if err != nil { + // skip frames runtime.Callers() and runtime.StackTrace() + b.WriteString(diag.StackTrace(2, 32)) + } else if p.includeBody { + err = writeRespBody(response, b) + } + log.Write(log.EventResponse, b.String()) + } + return response, err +} + +const redactedValue = "REDACTED" + +// getSanitizedURL returns a sanitized string for the provided url.URL +func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string { + // redact applicable query params + qp := u.Query() + for k := range qp { + if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok { + qp.Set(k, redactedValue) + } + } + u.RawQuery = qp.Encode() + return u.String() +} + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + value := header.Get(k) + // redact all header values not in the allow-list + if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { + value = redactedValue + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// returns true if the request/response body should be logged. +// this is determined by looking at the content-type header value. +func shouldLogBody(b *bytes.Buffer, contentType string) bool { + contentType = strings.ToLower(contentType) + if strings.HasPrefix(contentType, "text") || + strings.Contains(contentType, "json") || + strings.Contains(contentType, "xml") { + return true + } + fmt.Fprintf(b, " Skip logging body for %s\n", contentType) + return false +} + +// writes to a buffer, used for logging purposes +func writeReqBody(req *policy.Request, b *bytes.Buffer) error { + if req.Raw().Body == nil { + fmt.Fprint(b, " Request contained no body\n") + return nil + } + if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { + return nil + } + body, err := io.ReadAll(req.Raw().Body) + if err != nil { + fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) + return err + } + if err := req.RewindBody(); err != nil { + return err + } + logBody(b, body) + return nil +} + +// writes to a buffer, used for logging purposes +func writeRespBody(resp *http.Response, b *bytes.Buffer) error { + ct := resp.Header.Get(shared.HeaderContentType) + if ct == "" { + fmt.Fprint(b, " Response contained no body\n") + return nil + } else if !shouldLogBody(b, ct) { + return nil + } + body, err := Payload(resp) + if err != nil { + fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) + return err + } + if len(body) > 0 { + logBody(b, body) + } else { + fmt.Fprint(b, " Response contained no body\n") + } + return nil +} + +func logBody(b *bytes.Buffer, body []byte) { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprintln(b, string(body)) + fmt.Fprintln(b, " --------------------------------------------------------------------------------") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go new file mode 100644 index 00000000000..360a7f2118a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +type requestIDPolicy struct{} + +// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header +func NewRequestIDPolicy() policy.Policy { + return &requestIDPolicy{} +} + +func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { + if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" { + id, err := uuid.New() + if err != nil { + return nil, err + } + req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String()) + } + + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go new file mode 100644 index 00000000000..5f52ba75b45 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -0,0 +1,261 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +const ( + defaultMaxRetries = 3 +) + +func setDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = defaultMaxRetries + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + + // SDK guidelines specify the default MaxRetryDelay is 60 seconds + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 60 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 800 * time.Millisecond + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } + if o.StatusCodes == nil { + // NOTE: if you change this list, you MUST update the docs in policy/policy.go + o.StatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + } +} + +func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(pow(2, try)-1) * o.RetryDelay + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicy creates a policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { + if o == nil { + o = &policy.RetryOptions{} + } + p := &retryPolicy{options: *o} + return p +} + +type retryPolicy struct { + options policy.RetryOptions +} + +func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + options := p.options + // check if the retry options have been overridden for this call + if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { + options = override.(policy.RetryOptions) + } + setDefaults(&options) + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. + var rwbody *retryableRequestBody + if req.Body() != nil { + // wrap the body so we control when it's actually closed. + // do this outside the for loop so defers don't accumulate. + rwbody = &retryableRequestBody{body: req.Body()} + defer rwbody.realClose() + } + try := int32(1) + for { + resp = nil // reset + log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = req.RewindBody() + if err != nil { + return + } + // RewindBody() restores Raw().Body to its original state, so set our rewindable after + if rwbody != nil { + req.Raw().Body = rwbody + } + + if options.TryTimeout == 0 { + resp, err = req.Next() + } else { + // Set the per-try time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) + clone := req.Clone(tryCtx) + resp, err = clone.Next() // Make the request + // if the body was already downloaded or there was an error it's safe to cancel the context now + if err != nil { + tryCancel() + } else if exported.PayloadDownloaded(resp) { + tryCancel() + } else { + // must cancel the context after the body has been read and closed + resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} + } + } + if err == nil { + log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) + } else { + log.Writef(log.EventRetryPolicy, "error %v", err) + } + + if ctxErr := req.Raw().Context().Err(); ctxErr != nil { + // don't retry if the parent context has been cancelled or its deadline exceeded + err = ctxErr + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + + // check if the error is not retriable + var nre errorinfo.NonRetriable + if errors.As(err, &nre) { + // the error says it's not retriable so don't retry + log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) + return + } + + if options.ShouldRetry != nil { + // a non-nil ShouldRetry overrides our HTTP status code check + if !options.ShouldRetry(resp, err) { + // predicate says we shouldn't retry + log.Write(log.EventRetryPolicy, "exit due to ShouldRetry") + return + } + } else if err == nil && !HasStatusCode(resp, options.StatusCodes...) { + // if there is no error and the response code isn't in the list of retry codes then we're done. + log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") + return + } + + if try == options.MaxRetries+1 { + // max number of tries has been reached, don't sleep again + log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) + return + } + + // use the delay from retry-after if available + delay := shared.RetryAfter(resp) + if delay <= 0 { + delay = calcDelay(options, try) + } else if delay > options.MaxRetryDelay { + // the retry-after delay exceeds the the cap so don't retry + log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) + return + } + + // drain before retrying so nothing is leaked + Drain(resp) + + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) + select { + case <-time.After(delay): + try++ + case <-req.Raw().Context().Done(): + err = req.Raw().Context().Err() + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + } +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The retry policy closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following type/methods implement the contextCancelReadCloser + +// contextCancelReadCloser combines an io.ReadCloser with a cancel func. +// it ensures the cancel func is invoked once the body has been read and closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + rc.cf() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go new file mode 100644 index 00000000000..2abcdc576b6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "net/http" + "os" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +type telemetryPolicy struct { + telemetryValue string +} + +// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. +// The format is [ ]azsdk-go-/ . +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { + if o == nil { + o = &policy.TelemetryOptions{} + } + tp := telemetryPolicy{} + if o.Disabled { + return &tp + } + b := &bytes.Buffer{} + // normalize ApplicationID + if o.ApplicationID != "" { + o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") + if len(o.ApplicationID) > 24 { + o.ApplicationID = o.ApplicationID[:24] + } + b.WriteString(o.ApplicationID) + b.WriteRune(' ') + } + b.WriteString(formatTelemetry(mod, ver)) + b.WriteRune(' ') + b.WriteString(platformInfo) + tp.telemetryValue = b.String() + return &tp +} + +func formatTelemetry(comp, ver string) string { + return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) +} + +func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { + if p.telemetryValue == "" { + return req.Next() + } + // preserve the existing User-Agent string + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) + } + req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) + return req.Next() +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go new file mode 100644 index 00000000000..3d029a3d15b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -0,0 +1,327 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia = pollers.FinalStateVia + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation = pollers.FinalStateViaLocation + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation = pollers.FinalStateViaOpLocation +) + +// NewPollerOptions contains the optional parameters for NewPoller. +type NewPollerOptions[T any] struct { + // FinalStateVia contains the final-state-via value for the LRO. + FinalStateVia FinalStateVia + + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPoller creates a Poller based on the provided initial response. +func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + if options.Handler != nil { + return &Poller[T]{ + op: options.Handler, + resp: resp, + result: result, + }, nil + } + + defer resp.Body.Close() + // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). + // ideally the codegen should return an error if the initial response failed and not even create a poller. + if !poller.StatusCodeValid(resp) { + return nil, errors.New("the operation failed or was cancelled") + } + + // determine the polling method + var opr PollingHandler[T] + var err error + if async.Applicable(resp) { + // async poller must be checked first as it can also have a location header + opr, err = async.New[T](pl, resp, options.FinalStateVia) + } else if op.Applicable(resp) { + // op poller must be checked before loc as it can also have a location header + opr, err = op.New[T](pl, resp, options.FinalStateVia) + } else if loc.Applicable(resp) { + opr, err = loc.New[T](pl, resp) + } else if body.Applicable(resp) { + // must test body poller last as it's a subset of the other pollers. + // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) + opr, err = body.New[T](pl, resp) + } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { + // if we get here it means we have a 202 with no polling headers. + // for DELETE and POST this is a hard error per ARM RPC spec. + return nil, errors.New("response is missing polling URL") + } else { + opr, err = pollers.NewNopPoller[T](resp) + } + + if err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + resp: resp, + result: result, + }, nil +} + +// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. +type NewPollerFromResumeTokenOptions[T any] struct { + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPollerFromResumeToken creates a Poller from a resume token string. +func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerFromResumeTokenOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + + if err := pollers.IsTokenValid[T](token); err != nil { + return nil, err + } + raw, err := pollers.ExtractToken(token) + if err != nil { + return nil, err + } + var asJSON map[string]interface{} + if err := json.Unmarshal(raw, &asJSON); err != nil { + return nil, err + } + + opr := options.Handler + // now rehydrate the poller based on the encoded poller type + if opr != nil { + log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) + } else if async.CanResume(asJSON) { + opr, _ = async.New[T](pl, nil, "") + } else if body.CanResume(asJSON) { + opr, _ = body.New[T](pl, nil) + } else if loc.CanResume(asJSON) { + opr, _ = loc.New[T](pl, nil) + } else if op.CanResume(asJSON) { + opr, _ = op.New[T](pl, nil, "") + } else { + return nil, fmt.Errorf("unhandled poller token %s", string(raw)) + } + if err := json.Unmarshal(raw, &opr); err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + result: result, + }, nil +} + +// PollingHandler[T] abstracts the differences among poller implementations. +type PollingHandler[T any] interface { + // Done returns true if the LRO has reached a terminal state. + Done() bool + + // Poll fetches the latest state of the LRO. + Poll(context.Context) (*http.Response, error) + + // Result is called once the LRO has reached a terminal state. It populates the out parameter + // with the result of the operation. + Result(ctx context.Context, out *T) error +} + +// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +type Poller[T any] struct { + op PollingHandler[T] + resp *http.Response + err error + result *T + done bool +} + +// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. +type PollUntilDoneOptions struct { + // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. + // Pass zero to accept the default value (30s). + Frequency time.Duration +} + +// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. +// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. +// options: pass nil to accept the default values. +// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might +// benefit from a shorter or longer duration. +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { + if options == nil { + options = &PollUntilDoneOptions{} + } + cp := *options + if cp.Frequency == 0 { + cp.Frequency = 30 * time.Second + } + + // skip the floor check when executing tests so they don't take so long + if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { + return *new(T), errors.New("polling frequency minimum is one second") + } + + start := time.Now() + logPollUntilDoneExit := func(v interface{}) { + log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) + } + log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) + if p.resp != nil { + // initial check for a retry-after header existing on the initial response + if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { + log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) + if err := shared.Delay(ctx, retryAfter); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } + } + // begin polling the endpoint until a terminal state is reached + for { + resp, err := p.Poll(ctx) + if err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + if p.Done() { + logPollUntilDoneExit("succeeded") + return p.Result(ctx) + } + d := cp.Frequency + if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { + log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) + d = retryAfter + } else { + log.Writef(log.EventLRO, "delay for %s", d.String()) + } + if err = shared.Delay(ctx, d); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } +} + +// Poll fetches the latest state of the LRO. It returns an HTTP response or error. +// If Poll succeeds, the poller's state is updated and the HTTP response is returned. +// If Poll fails, the poller's state is unmodified and the error is returned. +// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + if p.Done() { + // the LRO has reached a terminal state, don't poll again + return p.resp, nil + } + resp, err := p.op.Poll(ctx) + if err != nil { + return nil, err + } + p.resp = resp + return p.resp, nil +} + +// Done returns true if the LRO has reached a terminal state. +// Once a terminal state is reached, call Result(). +func (p *Poller[T]) Done() bool { + return p.op.Done() +} + +// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. +// If the LRO completed successfully, a populated instance of T is returned. +// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. +// Calling this on an LRO in a non-terminal state will return an error. +func (p *Poller[T]) Result(ctx context.Context) (T, error) { + if !p.Done() { + return *new(T), errors.New("poller is in a non-terminal state") + } + if p.done { + // the result has already been retrieved, return the cached value + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil + } + err := p.op.Result(ctx, p.result) + var respErr *exported.ResponseError + if errors.As(err, &respErr) { + // the LRO failed. record the error + p.err = err + } else if err != nil { + // the call to Result failed, don't cache anything in this case + return *new(T), err + } + p.done = true + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil +} + +// ResumeToken returns a value representing the poller that can be used to resume +// the LRO at a later time. ResumeTokens are unique per service operation. +// The token's format should be considered opaque and is subject to change. +// Calling this on an LRO in a terminal state will return an error. +func (p *Poller[T]) ResumeToken() (string, error) { + if p.Done() { + return "", errors.New("poller is in a terminal state") + } + tk, err := pollers.NewResumeToken[T](p.op) + if err != nil { + return "", err + } + return tk, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go new file mode 100644 index 00000000000..98e00718488 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -0,0 +1,248 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "mime/multipart" + "os" + "path" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// NewRequest creates a new policy.Request with the specified input. +// The endpoint MUST be properly encoded before calling this function. +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { + return exported.NewRequest(ctx, httpMethod, endpoint) +} + +// JoinPaths concatenates multiple URL path segments into one path, +// inserting path separation characters as required. JoinPaths will preserve +// query parameters in the root path +func JoinPaths(root string, paths ...string) string { + if len(paths) == 0 { + return root + } + + qps := "" + if strings.Contains(root, "?") { + splitPath := strings.Split(root, "?") + root, qps = splitPath[0], splitPath[1] + } + + p := path.Join(paths...) + // path.Join will remove any trailing slashes. + // if one was provided, preserve it. + if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + if qps != "" { + p = p + "?" + qps + } + + if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { + root = root[:len(root)-1] + } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { + p = "/" + p + } + return root + p +} + +// EncodeByteArray will base-64 encode the byte slice v. +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + +// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. +// The encoded value is treated as a JSON string. +func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { + // send as a JSON string + encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) + return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) +} + +// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. +func MarshalAsJSON(req *policy.Request, v interface{}) error { + if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { + v = cloneWithoutReadOnlyFields(v) + } + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) +} + +// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. +func MarshalAsXML(req *policy.Request, v interface{}) error { + b, err := xml.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // inclue the XML header as some services require it + b = []byte(xml.Header + string(b)) + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) +} + +// SetMultipartFormData writes the specified keys/values as multi-part form +// fields with the specified value. File content must be specified as a ReadSeekCloser. +// All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { + body := bytes.Buffer{} + writer := multipart.NewWriter(&body) + + writeContent := func(fieldname, filename string, src io.Reader) error { + fd, err := writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, src); err != nil { + return err + } + return nil + } + + for k, v := range formData { + if rsc, ok := v.(io.ReadSeekCloser); ok { + if err := writeContent(k, k, rsc); err != nil { + return err + } + continue + } else if rscs, ok := v.([]io.ReadSeekCloser); ok { + for _, rsc := range rscs { + if err := writeContent(k, k, rsc); err != nil { + return err + } + } + continue + } + // ensure the value is in string format + s, ok := v.(string) + if !ok { + s = fmt.Sprintf("%v", v) + } + if err := writer.WriteField(k, s); err != nil { + return err + } + } + if err := writer.Close(); err != nil { + return err + } + return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) +} + +// SkipBodyDownload will disable automatic downloading of the response body. +func SkipBodyDownload(req *policy.Request) { + req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) +} + +// returns a clone of the object graph pointed to by v, omitting values of all read-only +// fields. if there are no read-only fields in the object graph, no clone is created. +func cloneWithoutReadOnlyFields(v interface{}) interface{} { + val := reflect.Indirect(reflect.ValueOf(v)) + if val.Kind() != reflect.Struct { + // not a struct, skip + return v + } + // first walk the graph to find any R/O fields. + // if there aren't any, skip cloning the graph. + if !recursiveFindReadOnlyField(val) { + return v + } + return recursiveCloneWithoutReadOnlyFields(val) +} + +// returns true if any field in the object graph of val contains the `azure:"ro"` tag value +func recursiveFindReadOnlyField(val reflect.Value) bool { + t := val.Type() + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + return true + } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { + return true + } + } + return false +} + +// clones the object graph of val. all non-R/O properties are copied to the clone +func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { + t := val.Type() + clone := reflect.New(t) + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + // omit from payload + continue + } + // clone field will receive the same value as the source field... + value := val.Field(i) + v := reflect.Indirect(value) + if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { + // ...unless the source value is a struct, in which case we recurse to clone that struct. + // (We can't recursively clone time.Time because it contains unexported fields.) + c := recursiveCloneWithoutReadOnlyFields(v) + if field.Anonymous { + // NOTE: this does not handle the case of embedded fields of unexported struct types. + // this should be ok as we don't generate any code like this at present + value = reflect.Indirect(reflect.ValueOf(c)) + } else { + value = reflect.ValueOf(c) + } + } + reflect.Indirect(clone).Field(i).Set(value) + } + return clone.Interface() +} + +// returns true if the "azure" tag contains the option "ro" +func azureTagIsReadOnly(tag string) bool { + if tag == "" { + return false + } + parts := strings.Split(tag, ",") + for _, part := range parts { + if part == "ro" { + return true + } + } + return false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go new file mode 100644 index 00000000000..d1f58e9e295 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +func Payload(resp *http.Response) ([]byte, error) { + return exported.Payload(resp, nil) +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + return exported.HasStatusCode(resp, statusCodes...) +} + +// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. +func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { + p, err := Payload(resp) + if err != nil { + return err + } + return DecodeByteArray(string(p), v, format) +} + +// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsJSON(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = json.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsXML(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = xml.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// Drain reads the response body to completion then closes it. The bytes read are discarded. +func Drain(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +// removeBOM removes any byte-order mark prefix from the payload if present. +func removeBOM(resp *http.Response) error { + _, err := exported.Payload(resp, &exported.PayloadOptions{ + BytesModifier: func(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + }, + }) + if err != nil { + return err + } + return nil +} + +// DecodeByteArray will base-64 decode the provided string into v. +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go new file mode 100644 index 00000000000..869bed51184 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +var defaultHTTPClient *http.Client + +func init() { + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + defaultHTTPClient = &http.Client{ + Transport: defaultTransport, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go new file mode 100644 index 00000000000..cadaef3d584 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package streaming contains helpers for streaming IO operations and progress reporting. +package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go new file mode 100644 index 00000000000..fbcd48311b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package streaming + +import ( + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +type progress struct { + rc io.ReadCloser + rsc io.ReadSeekCloser + pr func(bytesTransferred int64) + offset int64 +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an +// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser +// has its underlying stream closed. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return exported.NopCloser(rs) +} + +// NewRequestProgress adds progress reporting to an HTTP request's body stream. +func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { + return &progress{ + rc: body, + rsc: body, + pr: pr, + offset: 0, + } +} + +// NewResponseProgress adds progress reporting to an HTTP response's body stream. +func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { + return &progress{ + rc: body, + rsc: nil, + pr: pr, + offset: 0, + } +} + +// Read reads a block of data from an inner stream and reports progress +func (p *progress) Read(b []byte) (n int, err error) { + n, err = p.rc.Read(b) + if err != nil && err != io.EOF { + return + } + p.offset += int64(n) + // Invokes the user's callback method to report progress + p.pr(p.offset) + return +} + +// Seek only expects a zero or from beginning. +func (p *progress) Seek(offset int64, whence int) (int64, error) { + // This should only ever be called with offset = 0 and whence = io.SeekStart + n, err := p.rsc.Seek(offset, whence) + if err == nil { + p.offset = int64(n) + } + return n, err +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (p *progress) Close() error { + return p.rc.Close() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go new file mode 100644 index 00000000000..faa98c9dc51 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package to contains various type-conversion helper functions. +package to diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go new file mode 100644 index 00000000000..e0e4817b90d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go @@ -0,0 +1,21 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package to + +// Ptr returns a pointer to the provided value. +func Ptr[T any](v T) *T { + return &v +} + +// SliceOfPtrs returns a slice of *T from the specified values. +func SliceOfPtrs[T any](vv ...T) []*T { + slc := make([]*T, len(vv)) + for i := range vv { + slc[i] = Ptr(vv[i]) + } + return slc +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go new file mode 100644 index 00000000000..80282d4ab0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tracing + +// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. +type SpanKind int + +const ( + // SpanKindInternal indicates the span represents an internal operation within an application. + SpanKindInternal SpanKind = 1 + + // SpanKindServer indicates the span covers server-side handling of a request. + SpanKindServer SpanKind = 2 + + // SpanKindClient indicates the span describes a request to a remote service. + SpanKindClient SpanKind = 3 + + // SpanKindProducer indicates the span was created by a messaging producer. + SpanKindProducer SpanKind = 4 + + // SpanKindConsumer indicates the span was created by a messaging consumer. + SpanKindConsumer SpanKind = 5 +) + +// SpanStatus represents the status of a span. +type SpanStatus int + +const ( + // SpanStatusUnset is the default status code. + SpanStatusUnset SpanStatus = 0 + + // SpanStatusError indicates the operation contains an error. + SpanStatusError SpanStatus = 1 + + // SpanStatusOK indicates the operation completed successfully. + SpanStatusOK SpanStatus = 2 +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go new file mode 100644 index 00000000000..75f757cedd3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package tracing contains the definitions needed to support distributed tracing. +package tracing + +import ( + "context" +) + +// ProviderOptions contains the optional values when creating a Provider. +type ProviderOptions struct { + // for future expansion +} + +// NewProvider creates a new Provider with the specified values. +// - newTracerFn is the underlying implementation for creating Tracer instances +// - options contains optional values; pass nil to accept the default value +func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { + return Provider{ + newTracerFn: newTracerFn, + } +} + +// Provider is the factory that creates Tracer instances. +// It defaults to a no-op provider. +type Provider struct { + newTracerFn func(name, version string) Tracer +} + +// NewTracer creates a new Tracer for the specified name and version. +// - name - the name of the tracer object, typically the fully qualified name of the service client +// - version - the version of the module in which the service client resides +func (p Provider) NewTracer(name, version string) (tracer Tracer) { + if p.newTracerFn != nil { + tracer = p.newTracerFn(name, version) + } + return +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// TracerOptions contains the optional values when creating a Tracer. +type TracerOptions struct { + // for future expansion +} + +// NewTracer creates a Tracer with the specified values. +// - newSpanFn is the underlying implementation for creating Span instances +// - options contains optional values; pass nil to accept the default value +func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + return Tracer{ + newSpanFn: newSpanFn, + } +} + +// Tracer is the factory that creates Span instances. +type Tracer struct { + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) +} + +// Start creates a new span and a context.Context that contains it. +// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span +// - spanName identifies the span within a trace, it's typically the fully qualified API name +// - options contains optional values for the span, pass nil to accept any defaults +func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { + if t.newSpanFn != nil { + return t.newSpanFn(ctx, spanName, options) + } + return ctx, Span{} +} + +// SpanOptions contains optional settings for creating a span. +type SpanOptions struct { + // Kind indicates the kind of Span. + Kind SpanKind + + // Attributes contains key-value pairs of attributes for the span. + Attributes []Attribute +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SpanImpl abstracts the underlying implementation for Span, +// allowing it to work with various tracing implementations. +// Any zero-values will have their default, no-op behavior. +type SpanImpl struct { + // End contains the implementation for the Span.End method. + End func() + + // SetAttributes contains the implementation for the Span.SetAttributes method. + SetAttributes func(...Attribute) + + // AddEvent contains the implementation for the Span.AddEvent method. + AddEvent func(string, ...Attribute) + + // AddError contains the implementation for the Span.AddError method. + AddError func(err error) + + // SetStatus contains the implementation for the Span.SetStatus method. + SetStatus func(SpanStatus, string) +} + +// NewSpan creates a Span with the specified implementation. +func NewSpan(impl SpanImpl) Span { + return Span{ + impl: impl, + } +} + +// Span is a single unit of a trace. A trace can contain multiple spans. +// A zero-value Span provides a no-op implementation. +type Span struct { + impl SpanImpl +} + +// End terminates the span and MUST be called before the span leaves scope. +// Any further updates to the span will be ignored after End is called. +func (s Span) End() { + if s.impl.End != nil { + s.impl.End() + } +} + +// SetAttributes sets the specified attributes on the Span. +// Any existing attributes with the same keys will have their values overwritten. +func (s Span) SetAttributes(attrs ...Attribute) { + if s.impl.SetAttributes != nil { + s.impl.SetAttributes(attrs...) + } +} + +// AddEvent adds a named event with an optional set of attributes to the span. +func (s Span) AddEvent(name string, attrs ...Attribute) { + if s.impl.AddEvent != nil { + s.impl.AddEvent(name, attrs...) + } +} + +// AddError adds the specified error event to the span. +func (s Span) AddError(err error) { + if s.impl.AddError != nil { + s.impl.AddError(err) + } +} + +// SetStatus sets the status on the span along with a description. +func (s Span) SetStatus(code SpanStatus, desc string) { + if s.impl.SetStatus != nil { + s.impl.SetStatus(code, desc) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Attribute is a key-value pair. +type Attribute struct { + // Key is the name of the attribute. + Key string + + // Value is the attribute's value. + // Types that are natively supported include int64, float64, int, bool, string. + // Any other type will be formatted per rules of fmt.Sprintf("%v"). + Value any +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md new file mode 100644 index 00000000000..cc8034cf7a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -0,0 +1,409 @@ +# Release History + +## 1.3.0 (2023-05-09) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.5 +* Renamed `NewOnBehalfOfCredentialFromCertificate` to `NewOnBehalfOfCredentialWithCertificate` +* Renamed `NewOnBehalfOfCredentialFromSecret` to `NewOnBehalfOfCredentialWithSecret` + +### Other Changes +* Upgraded to MSAL v1.0.0 + +## 1.3.0-beta.5 (2023-04-11) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.4 +* Moved `NewWorkloadIdentityCredential()` parameters into `WorkloadIdentityCredentialOptions`. + The constructor now reads default configuration from environment variables set by the Azure + workload identity webhook by default. + ([#20478](https://github.com/Azure/azure-sdk-for-go/pull/20478)) +* Removed CAE support. It will return in v1.4.0-beta.1 + ([#20479](https://github.com/Azure/azure-sdk-for-go/pull/20479)) + +### Bugs Fixed +* Fixed an issue in `DefaultAzureCredential` that could cause the managed identity endpoint check to fail in rare circumstances. + +## 1.3.0-beta.4 (2023-03-08) + +### Features Added +* Added `WorkloadIdentityCredentialOptions.AdditionallyAllowedTenants` and `.DisableInstanceDiscovery` + +### Bugs Fixed +* Credentials now synchronize within `GetToken()` so a single instance can be shared among goroutines + ([#20044](https://github.com/Azure/azure-sdk-for-go/issues/20044)) + +### Other Changes +* Upgraded dependencies + +## 1.2.2 (2023-03-07) + +### Other Changes +* Upgraded dependencies + +## 1.3.0-beta.3 (2023-02-07) + +### Features Added +* By default, credentials set client capability "CP1" to enable support for + [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). + This indicates to Azure Active Directory that your application can handle CAE claims challenges. + You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". +* `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login + prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599)) +* Service principal and user credentials support ADFS authentication on Azure Stack. + Specify "adfs" as the credential's tenant. +* Applications running in private or disconnected clouds can prevent credentials from + requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery` + field on credential options. +* Many credentials can now be configured to authenticate in multiple tenants. The + options types for these credentials have an `AdditionallyAllowedTenants` field + that specifies additional tenants in which the credential may authenticate. + +## 1.3.0-beta.2 (2023-01-10) + +### Features Added +* Added `OnBehalfOfCredential` to support the on-behalf-of flow + ([#16642](https://github.com/Azure/azure-sdk-for-go/issues/16642)) + +### Bugs Fixed +* `AzureCLICredential` reports token expiration in local time (should be UTC) + +### Other Changes +* `AzureCLICredential` imposes its default timeout only when the `Context` + passed to `GetToken()` has no deadline +* Added `NewCredentialUnavailableError()`. This function constructs an error indicating + a credential can't authenticate and an encompassing `ChainedTokenCredential` should + try its next credential, if any. + +## 1.3.0-beta.1 (2022-12-13) + +### Features Added +* `WorkloadIdentityCredential` and `DefaultAzureCredential` support + Workload Identity Federation on Kubernetes. `DefaultAzureCredential` + support requires environment variable configuration as set by the + Workload Identity webhook. + ([#15615](https://github.com/Azure/azure-sdk-for-go/issues/15615)) + +## 1.2.0 (2022-11-08) + +### Other Changes +* This version includes all fixes and features from 1.2.0-beta.* + +## 1.2.0-beta.3 (2022-10-11) + +### Features Added +* `ManagedIdentityCredential` caches tokens in memory + +### Bugs Fixed +* `ClientCertificateCredential` sends only the leaf cert for SNI authentication + +## 1.2.0-beta.2 (2022-08-10) + +### Features Added +* Added `ClientAssertionCredential` to enable applications to authenticate + with custom client assertions + +### Other Changes +* Updated AuthenticationFailedError with links to TROUBLESHOOTING.md for relevant errors +* Upgraded `microsoft-authentication-library-for-go` requirement to v0.6.0 + +## 1.2.0-beta.1 (2022-06-07) + +### Features Added +* `EnvironmentCredential` reads certificate passwords from `AZURE_CLIENT_CERTIFICATE_PASSWORD` + ([#17099](https://github.com/Azure/azure-sdk-for-go/pull/17099)) + +## 1.1.0 (2022-06-07) + +### Features Added +* `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party + applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a + region name. + ([#15605](https://github.com/Azure/azure-sdk-for-go/issues/15605)) + +## 1.0.1 (2022-06-07) + +### Other Changes +* Upgrade `microsoft-authentication-library-for-go` requirement to v0.5.1 + ([#18176](https://github.com/Azure/azure-sdk-for-go/issues/18176)) + +## 1.0.0 (2022-05-12) + +### Features Added +* `DefaultAzureCredential` reads environment variable `AZURE_CLIENT_ID` for the + client ID of a user-assigned managed identity + ([#17293](https://github.com/Azure/azure-sdk-for-go/pull/17293)) + +### Breaking Changes +* Removed `AuthorizationCodeCredential`. Use `InteractiveBrowserCredential` instead + to authenticate a user with the authorization code flow. +* Instances of `AuthenticationFailedError` are now returned by pointer. +* `GetToken()` returns `azcore.AccessToken` by value + +### Bugs Fixed +* `AzureCLICredential` panics after receiving an unexpected error type + ([#17490](https://github.com/Azure/azure-sdk-for-go/issues/17490)) + +### Other Changes +* `GetToken()` returns an error when the caller specifies no scope +* Updated to the latest versions of `golang.org/x/crypto`, `azcore` and `internal` + +## 0.14.0 (2022-04-05) + +### Breaking Changes +* This module now requires Go 1.18 +* Removed `AuthorityHost`. Credentials are now configured for sovereign or private + clouds with the API in `azcore/cloud`, for example: + ```go + // before + opts := azidentity.ClientSecretCredentialOptions{AuthorityHost: azidentity.AzureGovernment} + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + + // after + import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + + opts := azidentity.ClientSecretCredentialOptions{} + opts.Cloud = cloud.AzureGovernment + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + ``` + +## 0.13.2 (2022-03-08) + +### Bugs Fixed +* Prevented a data race in `DefaultAzureCredential` and `ChainedTokenCredential` + ([#17144](https://github.com/Azure/azure-sdk-for-go/issues/17144)) + +### Other Changes +* Upgraded App Service managed identity version from 2017-09-01 to 2019-08-01 + ([#17086](https://github.com/Azure/azure-sdk-for-go/pull/17086)) + +## 0.13.1 (2022-02-08) + +### Features Added +* `EnvironmentCredential` supports certificate SNI authentication when + `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN` is "true". + ([#16851](https://github.com/Azure/azure-sdk-for-go/pull/16851)) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken()` now returns an error when configured for + a user assigned identity in Azure Cloud Shell (which doesn't support such identities) + ([#16946](https://github.com/Azure/azure-sdk-for-go/pull/16946)) + +### Other Changes +* `NewDefaultAzureCredential()` logs non-fatal errors. These errors are also included in the + error returned by `DefaultAzureCredential.GetToken()` when it's unable to acquire a token + from any source. ([#15923](https://github.com/Azure/azure-sdk-for-go/issues/15923)) + +## 0.13.0 (2022-01-11) + +### Breaking Changes +* Replaced `AuthenticationFailedError.RawResponse()` with a field having the same name +* Unexported `CredentialUnavailableError` +* Instances of `ChainedTokenCredential` will now skip looping through the list of source credentials and re-use the first successful credential on subsequent calls to `GetToken`. + * If `ChainedTokenCredentialOptions.RetrySources` is true, `ChainedTokenCredential` will continue to try all of the originally provided credentials each time the `GetToken` method is called. + * `ChainedTokenCredential.successfulCredential` will contain a reference to the last successful credential. + * `DefaultAzureCredenial` will also re-use the first successful credential on subsequent calls to `GetToken`. + * `DefaultAzureCredential.chain.successfulCredential` will also contain a reference to the last successful credential. + +### Other Changes +* `ManagedIdentityCredential` no longer probes IMDS before requesting a token + from it. Also, an error response from IMDS no longer disables a credential + instance. Following an error, a credential instance will continue to send + requests to IMDS as necessary. +* Adopted MSAL for user and service principal authentication +* Updated `azcore` requirement to 0.21.0 + +## 0.12.0 (2021-11-02) +### Breaking Changes +* Raised minimum go version to 1.16 +* Removed `NewAuthenticationPolicy()` from credentials. Clients should instead use azcore's + `runtime.NewBearerTokenPolicy()` to construct a bearer token authorization policy. +* The `AuthorityHost` field in credential options structs is now a custom type, + `AuthorityHost`, with underlying type `string` +* `NewChainedTokenCredential` has a new signature to accommodate a placeholder + options struct: + ```go + // before + cred, err := NewChainedTokenCredential(credA, credB) + + // after + cred, err := NewChainedTokenCredential([]azcore.TokenCredential{credA, credB}, nil) + ``` +* Removed `ExcludeAzureCLICredential`, `ExcludeEnvironmentCredential`, and `ExcludeMSICredential` + from `DefaultAzureCredentialOptions` +* `NewClientCertificateCredential` requires a `[]*x509.Certificate` and `crypto.PrivateKey` instead of + a path to a certificate file. Added `ParseCertificates` to simplify getting these in common cases: + ```go + // before + cred, err := NewClientCertificateCredential("tenant", "client-id", "/cert.pem", nil) + + // after + certData, err := os.ReadFile("/cert.pem") + certs, key, err := ParseCertificates(certData, password) + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, nil) + ``` +* Removed `InteractiveBrowserCredentialOptions.ClientSecret` and `.Port` +* Removed `AADAuthenticationFailedError` +* Removed `id` parameter of `NewManagedIdentityCredential()`. User assigned identities are now + specified by `ManagedIdentityCredentialOptions.ID`: + ```go + // before + cred, err := NewManagedIdentityCredential("client-id", nil) + // or, for a resource ID + opts := &ManagedIdentityCredentialOptions{ID: ResourceID} + cred, err := NewManagedIdentityCredential("/subscriptions/...", opts) + + // after + clientID := ClientID("7cf7db0d-...") + opts := &ManagedIdentityCredentialOptions{ID: clientID} + // or, for a resource ID + resID: ResourceID("/subscriptions/...") + opts := &ManagedIdentityCredentialOptions{ID: resID} + cred, err := NewManagedIdentityCredential(opts) + ``` +* `DeviceCodeCredentialOptions.UserPrompt` has a new type: `func(context.Context, DeviceCodeMessage) error` +* Credential options structs now embed `azcore.ClientOptions`. In addition to changing literal initialization + syntax, this change renames `HTTPClient` fields to `Transport`. +* Renamed `LogCredential` to `EventCredential` +* `AzureCLICredential` no longer reads the environment variable `AZURE_CLI_PATH` +* `NewManagedIdentityCredential` no longer reads environment variables `AZURE_CLIENT_ID` and + `AZURE_RESOURCE_ID`. Use `ManagedIdentityCredentialOptions.ID` instead. +* Unexported `AuthenticationFailedError` and `CredentialUnavailableError` structs. In their place are two + interfaces having the same names. + +### Bugs Fixed +* `AzureCLICredential.GetToken` no longer mutates its `opts.Scopes` + +### Features Added +* Added connection configuration options to `DefaultAzureCredentialOptions` +* `AuthenticationFailedError.RawResponse()` returns the HTTP response motivating the error, + if available + +### Other Changes +* `NewDefaultAzureCredential()` returns `*DefaultAzureCredential` instead of `*ChainedTokenCredential` +* Added `TenantID` field to `DefaultAzureCredentialOptions` and `AzureCLICredentialOptions` + +## 0.11.0 (2021-09-08) +### Breaking Changes +* Unexported `AzureCLICredentialOptions.TokenProvider` and its type, + `AzureCLITokenProvider` + +### Bug Fixes +* `ManagedIdentityCredential.GetToken` returns `CredentialUnavailableError` + when IMDS has no assigned identity, signaling `DefaultAzureCredential` to + try other credentials + + +## 0.10.0 (2021-08-30) +### Breaking Changes +* Update based on `azcore` refactor [#15383](https://github.com/Azure/azure-sdk-for-go/pull/15383) + +## 0.9.3 (2021-08-20) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken` no longer mutates its `opts.Scopes` + +### Other Changes +* Bumps version of `azcore` to `v0.18.1` + + +## 0.9.2 (2021-07-23) +### Features Added +* Adding support for Service Fabric environment in `ManagedIdentityCredential` +* Adding an option for using a resource ID instead of client ID in `ManagedIdentityCredential` + + +## 0.9.1 (2021-05-24) +### Features Added +* Add LICENSE.txt and bump version information + + +## 0.9.0 (2021-05-21) +### Features Added +* Add support for authenticating in Azure Stack environments +* Enable user assigned identities for the IMDS scenario in `ManagedIdentityCredential` +* Add scope to resource conversion in `GetToken()` on `ManagedIdentityCredential` + + +## 0.8.0 (2021-01-20) +### Features Added +* Updating documentation + + +## 0.7.1 (2021-01-04) +### Features Added +* Adding port option to `InteractiveBrowserCredential` + + +## 0.7.0 (2020-12-11) +### Features Added +* Add `redirectURI` parameter back to authentication code flow + + +## 0.6.1 (2020-12-09) +### Features Added +* Updating query parameter in `ManagedIdentityCredential` and updating datetime string for parsing managed identity access tokens. + + +## 0.6.0 (2020-11-16) +### Features Added +* Remove `RedirectURL` parameter from auth code flow to align with the MSAL implementation which relies on the native client redirect URL. + + +## 0.5.0 (2020-10-30) +### Features Added +* Flattening credential options + + +## 0.4.3 (2020-10-21) +### Features Added +* Adding Azure Arc support in `ManagedIdentityCredential` + + +## 0.4.2 (2020-10-16) +### Features Added +* Typo fixes + + +## 0.4.1 (2020-10-16) +### Features Added +* Ensure authority hosts are only HTTPs + + +## 0.4.0 (2020-10-16) +### Features Added +* Adding options structs for credentials + + +## 0.3.0 (2020-10-09) +### Features Added +* Update `DeviceCodeCredential` callback + + +## 0.2.2 (2020-10-09) +### Features Added +* Add `AuthorizationCodeCredential` + + +## 0.2.1 (2020-10-06) +### Features Added +* Add `InteractiveBrowserCredential` + + +## 0.2.0 (2020-09-11) +### Features Added +* Refactor `azidentity` on top of `azcore` refactor +* Updated policies to conform to `policy.Policy` interface changes. +* Updated non-retriable errors to conform to `azcore.NonRetriableError`. +* Fixed calls to `Request.SetBody()` to include content type. +* Switched endpoints to string types and removed extra parsing code. + + +## 0.1.1 (2020-09-02) +### Features Added +* Add `AzureCLICredential` to `DefaultAzureCredential` chain + + +## 0.1.0 (2020-07-23) +### Features Added +* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt new file mode 100644 index 00000000000..48ea6616b5b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md new file mode 100644 index 00000000000..4ac53eb7b27 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -0,0 +1,307 @@ +# Migrating from autorest/adal to azidentity + +`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. + +This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. + +## Table of contents + +- [Acquire a token](#acquire-a-token) +- [Client certificate authentication](#client-certificate-authentication) +- [Client secret authentication](#client-secret-authentication) +- [Configuration](#configuration) +- [Device code authentication](#device-code-authentication) +- [Managed identity](#managed-identity) +- [Use azidentity credentials with older packages](#use-azidentity-credentials-with-older-packages) + +## Configuration + +### `autorest/adal` + +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.chinacloudapi.cn", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.chinacloudapi.cn/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +``` + +### `azidentity` + +A credential instance can acquire tokens for any audience. The audience for each token is determined by the client requesting it. Credentials require endpoint configuration only for sovereign or private clouds. The `azcore/cloud` package has predefined configuration for sovereign clouds such as Azure China: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +clientOpts := azcore.ClientOptions{Cloud: cloud.AzureChina} + +cred, err := azidentity.NewClientSecretCredential( + tenantID, clientID, secret, &azidentity.ClientSecretCredentialOptions{ClientOptions: clientOpts}, +) +handle(err) +``` + +## Client secret authentication + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.azure.com/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Client certificate authentication + +### `autorest/adal` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +handle(err) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, clientID, certificate, rsaPrivateKey, "https://management.azure.com/", +) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certs, key, err := azidentity.ParseCertificates(certData, nil) +handle(err) + +cred, err = azidentity.NewClientCertificateCredential(tenantID, clientID, certs, key, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Managed identity + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/", nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewManagedIdentityCredential(nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +### User-assigned identities + +`autorest/adal`: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +opts := &adal.ManagedIdentityOptions{ClientID: "..."} +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/") +handle(err) +``` + +`azidentity`: + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + +opts := azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID("...")} +cred, err := azidentity.NewManagedIdentityCredential(&opts) +handle(err) +``` + +## Device code authentication + +### `autorest/adal` + +```go +import ( + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthClient := &http.Client{} +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +resource := "https://management.azure.com/" +deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthCfg, clientID, resource) +handle(err) + +// display instructions, wait for the user to authenticate +fmt.Println(*deviceCode.Message) +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthCfg, clientID, resource, *token) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewDeviceCodeCredential(nil) +handle(err) + +client, err := armsubscriptions.NewSubscriptionsClient(cred, nil) +handle(err) +``` + +`azidentity.DeviceCodeCredential` will guide a user through authentication, printing instructions to the console by default. The user prompt is customizable. For more information, see the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential). + +## Acquire a token + +### `autorest/adal` + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://vault.azure.net", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) + +err = spt.Refresh() +if err == nil { + token := spt.Token +} +``` + +### `azidentity` + +In ordinary usage, application code doesn't need to request tokens from credentials directly. Azure SDK clients handle token acquisition and refreshing internally. However, applications may call `GetToken()` to do so. All credential types have this method. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +tk, err := cred.GetToken( + context.TODO(), policy.TokenRequestOptions{Scopes: []string{"https://vault.azure.net/.default"}}, +) +if err == nil { + token := tk.Token +} +``` + +Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). + +## Use azidentity credentials with older packages + +The [azidext module](https://pkg.go.dev/github.com/jongio/azidext/go/azidext) provides an adapter for `azidentity` credential types. The adapter enables using the credential types with older Azure SDK clients. For example: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/jongio/azidext/go/azidext" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"}) +``` + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FMIGRATION.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md new file mode 100644 index 00000000000..da0baa9add3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -0,0 +1,243 @@ +# Azure Identity Client Module for Go + +The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) +| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) + +# Getting started + +## Install the module + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure Identity module: + +```sh +go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 + +### Authenticating during local development + +When debugging and executing code locally, developers typically use their own accounts to authenticate calls to Azure services. The `azidentity` module supports authenticating through developer tools to simplify local development. + +#### Authenticating via the Azure CLI + +`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user +signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. + +When no default browser is available, `az login` will use the device code +authentication flow. This can also be selected manually by running `az login --use-device-code`. + +## Key concepts + +### Credentials + +A credential is a type which contains or can obtain the data needed for a +service client to authenticate requests. Service clients across the Azure SDK +accept a credential instance when they are constructed, and use that credential +to authenticate requests. + +The `azidentity` module focuses on OAuth authentication with Azure Active +Directory (AAD). It offers a variety of credential types capable of acquiring +an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. + +### DefaultAzureCredential + +`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: + +![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) + +1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. +1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. +1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. +1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. + +> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. + +## Managed Identity + +`DefaultAzureCredential` and `ManagedIdentityCredential` support +[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +in any hosting environment which supports managed identities, such as (this list is not exhaustive): +* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) + +## Examples + +- [Authenticate with DefaultAzureCredential](#authenticate-with-defaultazurecredential "Authenticate with DefaultAzureCredential") +- [Define a custom authentication flow with ChainedTokenCredential](#define-a-custom-authentication-flow-with-chainedtokencredential "Define a custom authentication flow with ChainedTokenCredential") +- [Specify a user-assigned managed identity for DefaultAzureCredential](#specify-a-user-assigned-managed-identity-for-defaultazurecredential) + +### Authenticate with DefaultAzureCredential + +This example demonstrates authenticating a client from the `armresources` module with `DefaultAzureCredential`. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", cred, nil) +``` + +### Specify a user-assigned managed identity for DefaultAzureCredential + +To configure `DefaultAzureCredential` to authenticate a user-assigned managed identity, set the environment variable `AZURE_CLIENT_ID` to the identity's client ID. + +### Define a custom authentication flow with `ChainedTokenCredential` + +`DefaultAzureCredential` is generally the quickest way to get started developing apps for Azure. For more advanced scenarios, `ChainedTokenCredential` links multiple credential instances to be tried sequentially when authenticating. It will try each chained credential in turn until one provides a token or fails to authenticate due to an error. + +The following example demonstrates creating a credential, which will attempt to authenticate using managed identity. It will fall back to authenticating via the Azure CLI when a managed identity is unavailable. + +```go +managed, err := azidentity.NewManagedIdentityCredential(nil) +if err != nil { + // handle error +} +azCLI, err := azidentity.NewAzureCLICredential(nil) +if err != nil { + // handle error +} +chain, err := azidentity.NewChainedTokenCredential([]azcore.TokenCredential{managed, azCLI}, nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) +``` + +## Credential Types + +### Authenticating Azure Hosted Applications + +|Credential|Usage +|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables +|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource +|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes + +### Authenticating Service Principals + +|Credential|Usage +|-|- +|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion +|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate +|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret + +### Authenticating Users + +|Credential|Usage +|-|- +|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser +|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI +|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password + +### Authenticating via Development Tools + +|Credential|Usage +|-|- +|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI + +## Environment Variables + +`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables: + +#### Service principal with secret + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_SECRET`|one of the application's client secrets + +#### Service principal with certificate + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key +|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any + +#### Username and password + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_USERNAME`|a username (usually an email address) +|`AZURE_PASSWORD`|that user's password + +Configuration is attempted in the above order. For example, if values for a +client secret and certificate are both present, the client secret will be used. + +## Troubleshooting + +### Error Handling + +Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). + +For more details on handling specific Azure Active Directory errors please refer to the +Azure Active Directory +[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). + +### Logging + +This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. Use the `azcore/log` package to control log event output or to enable logs for `azidentity` only. For example: +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +Credentials log basic information only, such as `GetToken` success or failure and errors. These log entries don't contain authentication secrets but may contain sensitive information. + +## Next steps + +Client and management modules listed on the [Azure SDK releases page](https://azure.github.io/azure-sdk/releases/latest/go.html) support authenticating with `azidentity` credential types. You can learn more about using these libraries in their documentation, which is linked from the release page. + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues). + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md new file mode 100644 index 00000000000..7b7515ebac2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -0,0 +1,205 @@ +# Troubleshoot Azure Identity authentication issues + +This troubleshooting guide covers failure investigation techniques, common errors for the credential types in the `azidentity` module, and mitigation steps to resolve these errors. + +## Table of contents + +- [Handle azidentity errors](#handle-azidentity-errors) + - [Permission issues](#permission-issues) +- [Find relevant information in errors](#find-relevant-information-in-errors) +- [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) +- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) +- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) +- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues) +- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues) + - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity) + - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity) + - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity) +- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues) +- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues) +- [Get additional help](#get-additional-help) + +## Handle azidentity errors + +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. + +### Permission issues + +Service client errors with a status code of 401 or 403 often indicate that authentication succeeded but the caller doesn't have permission to access the specified API. Check the service documentation to determine which RBAC roles are needed for the request, and ensure the authenticated user or service principal has the appropriate role assignments. + +## Find relevant information in errors + +Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: + +``` +ClientSecretCredential authentication failed +POST https://login.microsoftonline.com/3c631bb7-a9f7-4343-a5ba-a615913/oauth2/v2.0/token +-------------------------------------------------------------------------------- +RESPONSE 401 Unauthorized +-------------------------------------------------------------------------------- +{ + "error": "invalid_client", + "error_description": "AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app '86be4c01-505b-45e9-bfc0-9b825fd84'.\r\nTrace ID: 03da4b8e-5ffe-48ca-9754-aff4276f0100\r\nCorrelation ID: 7b12f9bb-2eef-42e3-ad75-eee69ec9088d\r\nTimestamp: 2022-03-02 18:25:26Z", + "error_codes": [ + 7000215 + ], + "timestamp": "2022-03-02 18:25:26Z", + "trace_id": "03da4b8e-5ffe-48ca-9754-aff4276f0100", + "correlation_id": "7b12f9bb-2eef-42e3-ad75-eee69ec9088d", + "error_uri": "https://login.microsoftonline.com/error?code=7000215" +} +-------------------------------------------------------------------------------- +``` + +This error contains several pieces of information: + +- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. + +- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. + +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. + +### Enable and configure logging + +`azidentity` provides the same logging capabilities as the rest of the Azure SDK. The simplest way to see the logs to help debug authentication issues is to print credential logs to the console. +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +## Troubleshoot DefaultAzureCredential authentication issues + +| Error |Description| Mitigation | +|---|---|---| +|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | +|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
    • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
    • If an unexpected credential is returning a token, check application configuration such as environment variables.
    • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
    | + +## Troubleshoot EnvironmentCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)| + + +## Troubleshoot ClientSecretCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot ClientCertificateCredential authentication issues + +| Error Code | Description | Mitigation | +|---|---|---| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot UsernamePasswordCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.| + + +## Troubleshoot ManagedIdentityCredential authentication issues + +`ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`. + +|Host Environment| | | +|---|---|---| +|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| +|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| + +### Azure Virtual Machine managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

    If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
    • Refer to the error message for more details on specific failures.
    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | + +#### Verify IMDS is available on the VM + +If you have access to the VM, you can use `curl` to verify the managed identity endpoint is available. + +```sh +curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://management.core.windows.net&api-version=2018-02-01' -H "Metadata: true" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure App Service and Azure Functions managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
    • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
    • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
    | + +#### Verify the App Service managed identity endpoint is available + +If you can SSH into the App Service, you can verify managed identity is available in the environment. First ensure the environment variables `IDENTITY_ENDPOINT` and `IDENTITY_SECRET` are set. Then you can verify the managed identity endpoint is available using `curl`. + +```sh +curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-version=2019-08-01" -H "X-IDENTITY-HEADER: $IDENTITY_HEADER" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure Kubernetes Service managed identity + +#### Pod Identity + +| Error Message |Description| Mitigation | +|---|---|---| +|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). + + +## Troubleshoot AzureCliCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
    • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
    • Validate the installation location is in the application's `PATH` environment variable.
    | +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
    • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
    • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
    | + +#### Verify the Azure CLI can obtain tokens + +You can manually verify that the Azure CLI can authenticate and obtain tokens. First, use the `account` command to verify the logged in account. + +```azurecli +az account show +``` + +Once you've verified the Azure CLI is using the correct account, you can validate that it's able to obtain tokens for that account. + +```azurecli +az account get-access-token --output json --resource https://management.core.windows.net +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + + +## Troubleshoot `WorkloadIdentityCredential` authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
    • If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
    • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` + +## Get additional help + +Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json new file mode 100644 index 00000000000..47e77f88e3f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/azidentity", + "Tag": "go/azidentity_6225ab0470" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go new file mode 100644 index 00000000000..739ff49c1ec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -0,0 +1,190 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/url" + "os" + "regexp" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const ( + azureAdditionallyAllowedTenants = "AZURE_ADDITIONALLY_ALLOWED_TENANTS" + azureAuthorityHost = "AZURE_AUTHORITY_HOST" + azureClientCertificatePassword = "AZURE_CLIENT_CERTIFICATE_PASSWORD" + azureClientCertificatePath = "AZURE_CLIENT_CERTIFICATE_PATH" + azureClientID = "AZURE_CLIENT_ID" + azureClientSecret = "AZURE_CLIENT_SECRET" + azureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE" + azurePassword = "AZURE_PASSWORD" + azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureTenantID = "AZURE_TENANT_ID" + azureUsername = "AZURE_USERNAME" + + organizationsTenantID = "organizations" + developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" + defaultSuffix = "/.default" + tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" +) + +var ( + // capability CP1 indicates the client application is capable of handling CAE claims challenges + cp1 = []string{"CP1"} + // CP1 is disabled until CAE support is added back + disableCP1 = true +) + +var getConfidentialClient = func(clientID, tenantID string, cred confidential.Credential, co *azcore.ClientOptions, additionalOpts ...confidential.Option) (confidentialClient, error) { + if !validTenantID(tenantID) { + return confidential.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return confidential.Client{}, err + } + authority := runtime.JoinPaths(authorityHost, tenantID) + o := []confidential.Option{ + confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), + confidential.WithHTTPClient(newPipelineAdapter(co)), + } + if !disableCP1 { + o = append(o, confidential.WithClientCapabilities(cp1)) + } + o = append(o, additionalOpts...) + if strings.ToLower(tenantID) == "adfs" { + o = append(o, confidential.WithInstanceDiscovery(false)) + } + return confidential.New(authority, clientID, cred, o...) +} + +var getPublicClient = func(clientID, tenantID string, co *azcore.ClientOptions, additionalOpts ...public.Option) (public.Client, error) { + if !validTenantID(tenantID) { + return public.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return public.Client{}, err + } + o := []public.Option{ + public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + public.WithHTTPClient(newPipelineAdapter(co)), + } + if !disableCP1 { + o = append(o, public.WithClientCapabilities(cp1)) + } + o = append(o, additionalOpts...) + if strings.ToLower(tenantID) == "adfs" { + o = append(o, public.WithInstanceDiscovery(false)) + } + return public.New(clientID, o...) +} + +// setAuthorityHost initializes the authority host for credentials. Precedence is: +// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user +// 2. value of AZURE_AUTHORITY_HOST +// 3. default: Azure Public Cloud +func setAuthorityHost(cc cloud.Configuration) (string, error) { + host := cc.ActiveDirectoryAuthorityHost + if host == "" { + if len(cc.Services) > 0 { + return "", errors.New("missing ActiveDirectoryAuthorityHost for specified cloud") + } + host = cloud.AzurePublic.ActiveDirectoryAuthorityHost + if envAuthorityHost := os.Getenv(azureAuthorityHost); envAuthorityHost != "" { + host = envAuthorityHost + } + } + u, err := url.Parse(host) + if err != nil { + return "", err + } + if u.Scheme != "https" { + return "", errors.New("cannot use an authority host without https") + } + return host, nil +} + +// validTenantID return true is it receives a valid tenantID, returns false otherwise +func validTenantID(tenantID string) bool { + match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) + if err != nil { + return false + } + return match +} + +func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { + pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) + return pipelineAdapter{pl: pl} +} + +type pipelineAdapter struct { + pl runtime.Pipeline +} + +func (p pipelineAdapter) CloseIdleConnections() { + // do nothing +} + +func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { + req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) + if err != nil { + return nil, err + } + if r.Body != nil && r.Body != http.NoBody { + // create a rewindable body from the existing body as required + var body io.ReadSeekCloser + if rsc, ok := r.Body.(io.ReadSeekCloser); ok { + body = rsc + } else { + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + body = streaming.NopCloser(bytes.NewReader(b)) + } + err = req.SetBody(body, r.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + resp, err := p.pl.Do(req) + if err != nil { + return nil, err + } + return resp, err +} + +// enables fakes for test scenarios +type confidentialClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireSilentOption) (confidential.AuthResult, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireByAuthCodeOption) (confidential.AuthResult, error) + AcquireTokenByCredential(ctx context.Context, scopes []string, options ...confidential.AcquireByCredentialOption) (confidential.AuthResult, error) + AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error) +} + +// enables fakes for test scenarios +type publicClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) + AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string, options ...public.AcquireByUsernamePasswordOption) (public.AuthResult, error) + AcquireTokenByDeviceCode(ctx context.Context, scopes []string, options ...public.AcquireByDeviceCodeOption) (public.DeviceCode, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireByAuthCodeOption) (public.AuthResult, error) + AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.AcquireInteractiveOption) (public.AuthResult, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go new file mode 100644 index 00000000000..33ff13c09db --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -0,0 +1,180 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const ( + credNameAzureCLI = "AzureCLICredential" + timeoutCLIRequest = 10 * time.Second +) + +// used by tests to fake invoking the CLI +type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) + +// AzureCLICredentialOptions contains optional parameters for AzureCLICredential. +type AzureCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + // TenantID identifies the tenant the credential should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. + TenantID string + + tokenProvider azureCLITokenProvider +} + +// init returns an instance of AzureCLICredentialOptions initialized with default values. +func (o *AzureCLICredentialOptions) init() { + if o.tokenProvider == nil { + o.tokenProvider = defaultTokenProvider() + } +} + +// AzureCLICredential authenticates as the identity logged in to the Azure CLI. +type AzureCLICredential struct { + s *syncer + tokenProvider azureCLITokenProvider +} + +// NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options. +func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredential, error) { + cp := AzureCLICredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c := AzureCLICredential{tokenProvider: cp.tokenProvider} + c.s = newSyncer(credNameAzureCLI, cp.TenantID, cp.AdditionallyAllowedTenants, c.requestToken, c.requestToken) + return &c, nil +} + +// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. +// This method is called automatically by Azure SDK clients. +func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + // CLI expects an AAD v1 resource, not a v2 scope + opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + return c.s.GetToken(ctx, opts) +} + +func (c *AzureCLICredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + b, err := c.tokenProvider(ctx, opts.Scopes[0], opts.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + at, err := c.createAccessToken(b) + if err != nil { + return azcore.AccessToken{}, err + } + return at, nil +} + +func defaultTokenProvider() func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + return func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) + } + + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) + defer cancel() + } + + commandLine := "az account get-access-token -o json --resource " + resource + if tenantID != "" { + commandLine += " --tenant " + tenantID + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { + msg = "Azure CLI not found on path" + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureCLI, msg) + } + + return output, nil + } +} + +func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + + // the Azure CLI's "expiresOn" is local time + exp, err := time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("Error parsing token expiration time %q: %v", t.ExpiresOn, err) + } + + converted := azcore.AccessToken{ + Token: t.AccessToken, + ExpiresOn: exp.UTC(), + } + return converted, nil +} + +var _ azcore.TokenCredential = (*AzureCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go new file mode 100644 index 00000000000..dc855edf786 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -0,0 +1,138 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// ChainedTokenCredentialOptions contains optional parameters for ChainedTokenCredential. +type ChainedTokenCredentialOptions struct { + // RetrySources configures how the credential uses its sources. When true, the credential always attempts to + // authenticate through each source in turn, stopping when one succeeds. When false, the credential authenticates + // only through this first successful source--it never again tries the sources which failed. + RetrySources bool +} + +// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, +// it tries all the credentials until one authenticates, after which it always uses that credential. +type ChainedTokenCredential struct { + cond *sync.Cond + iterating bool + name string + retrySources bool + sources []azcore.TokenCredential + successfulCredential azcore.TokenCredential +} + +// NewChainedTokenCredential creates a ChainedTokenCredential. Pass nil for options to accept defaults. +func NewChainedTokenCredential(sources []azcore.TokenCredential, options *ChainedTokenCredentialOptions) (*ChainedTokenCredential, error) { + if len(sources) == 0 { + return nil, errors.New("sources must contain at least one TokenCredential") + } + for _, source := range sources { + if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil + return nil, errors.New("sources cannot contain nil") + } + } + cp := make([]azcore.TokenCredential, len(sources)) + copy(cp, sources) + if options == nil { + options = &ChainedTokenCredentialOptions{} + } + return &ChainedTokenCredential{ + cond: sync.NewCond(&sync.Mutex{}), + name: "ChainedTokenCredential", + retrySources: options.RetrySources, + sources: cp, + }, nil +} + +// GetToken calls GetToken on the chained credentials in turn, stopping when one returns a token. +// This method is called automatically by Azure SDK clients. +func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if !c.retrySources { + // ensure only one goroutine at a time iterates the sources and perhaps sets c.successfulCredential + c.cond.L.Lock() + for { + if c.successfulCredential != nil { + c.cond.L.Unlock() + return c.successfulCredential.GetToken(ctx, opts) + } + if !c.iterating { + c.iterating = true + // allow other goroutines to wait while this one iterates + c.cond.L.Unlock() + break + } + c.cond.Wait() + } + } + + var ( + err error + errs []error + successfulCredential azcore.TokenCredential + token azcore.AccessToken + unavailableErr *credentialUnavailableError + ) + for _, cred := range c.sources { + token, err = cred.GetToken(ctx, opts) + if err == nil { + log.Writef(EventAuthentication, "%s authenticated with %s", c.name, extractCredentialName(cred)) + successfulCredential = cred + break + } + errs = append(errs, err) + // continue to the next source iff this one returned credentialUnavailableError + if !errors.As(err, &unavailableErr) { + break + } + } + if c.iterating { + c.cond.L.Lock() + // this is nil when all credentials returned an error + c.successfulCredential = successfulCredential + c.iterating = false + c.cond.L.Unlock() + c.cond.Broadcast() + } + // err is the error returned by the last GetToken call. It will be nil when that call succeeds + if err != nil { + // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise + msg := createChainedErrorMessage(errs) + if errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(c.name, msg) + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, msg, res, err) + } + } + return token, err +} + +func createChainedErrorMessage(errs []error) string { + msg := "failed to acquire a token.\nAttempted credentials:" + for _, err := range errs { + msg += fmt.Sprintf("\n\t%s", err.Error()) + } + return msg +} + +func extractCredentialName(credential azcore.TokenCredential) string { + return strings.TrimPrefix(fmt.Sprintf("%T", credential), "*azidentity.") +} + +var _ azcore.TokenCredential = (*ChainedTokenCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml new file mode 100644 index 00000000000..3b443e8eedb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -0,0 +1,47 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + RunLiveTests: true + ServiceDirectory: 'azidentity' + PreSteps: + - pwsh: | + [System.Convert]::FromBase64String($env:PFX_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/test.pfx -AsByteStream + Set-Content -Path $(Agent.TempDirectory)/test.pem -Value $env:PEM_CONTENTS + [System.Convert]::FromBase64String($env:SNI_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/testsni.pfx -AsByteStream + env: + PFX_CONTENTS: $(net-identity-spcert-pfx) + PEM_CONTENTS: $(net-identity-spcert-pem) + SNI_CONTENTS: $(net-identity-spcert-sni) + EnvVars: + AZURE_IDENTITY_TEST_TENANTID: $(net-identity-tenantid) + AZURE_IDENTITY_TEST_USERNAME: $(net-identity-username) + AZURE_IDENTITY_TEST_PASSWORD: $(net-identity-password) + IDENTITY_SP_TENANT_ID: $(net-identity-sp-tenantid) + IDENTITY_SP_CLIENT_ID: $(net-identity-sp-clientid) + IDENTITY_SP_CLIENT_SECRET: $(net-identity-sp-clientsecret) + IDENTITY_SP_CERT_PEM: $(Agent.TempDirectory)/test.pem + IDENTITY_SP_CERT_PFX: $(Agent.TempDirectory)/test.pfx + IDENTITY_SP_CERT_SNI: $(Agent.TempDirectory)/testsni.pfx diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go new file mode 100644 index 00000000000..d9d22996cd4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -0,0 +1,83 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameAssertion = "ClientAssertionCredential" + +// ClientAssertionCredential authenticates an application with assertions provided by a callback function. +// This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for +// the most common assertion scenario, authenticating a service principal with a certificate. See +// [Azure AD documentation] for details of the assertion format. +// +// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +type ClientAssertionCredential struct { + client confidentialClient + s *syncer +} + +// ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. +type ClientAssertionCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. +func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(context.Context) (string, error), options *ClientAssertionCredentialOptions) (*ClientAssertionCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion must be a function that returns assertions") + } + if options == nil { + options = &ClientAssertionCredentialOptions{} + } + cred := confidential.NewCredFromAssertionCallback( + func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }, + ) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + cac := ClientAssertionCredential{client: c} + cac.s = newSyncer(credNameAssertion, tenantID, options.AdditionallyAllowedTenants, cac.requestToken, cac.silentAuth) + return &cac, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientAssertionCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientAssertionCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go new file mode 100644 index 00000000000..804eba899ec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -0,0 +1,172 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "golang.org/x/crypto/pkcs12" +) + +const credNameCert = "ClientCertificateCredential" + +// ClientCertificateCredentialOptions contains optional parameters for ClientCertificateCredential. +type ClientCertificateCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c + // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. + // Defaults to False. + SendCertificateChain bool +} + +// ClientCertificateCredential authenticates a service principal with a certificate. +type ClientCertificateCredential struct { + client confidentialClient + s *syncer +} + +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. +func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { + if len(certs) == 0 { + return nil, errors.New("at least one certificate is required") + } + if options == nil { + options = &ClientCertificateCredentialOptions{} + } + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + var o []confidential.Option + if options.SendCertificateChain { + o = append(o, confidential.WithX5C()) + } + o = append(o, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) + if err != nil { + return nil, err + } + cc := ClientCertificateCredential{client: c} + cc.s = newSyncer(credNameCert, tenantID, options.AdditionallyAllowedTenants, cc.requestToken, cc.silentAuth) + return &cc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientCertificateCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientCertificateCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. +// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format. +func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { + var blocks []*pem.Block + var err error + if len(password) == 0 { + blocks, err = loadPEMCert(certData) + } + if len(blocks) == 0 || err != nil { + blocks, err = loadPKCS12Cert(certData, string(password)) + } + if err != nil { + return nil, nil, err + } + var certs []*x509.Certificate + var pk crypto.PrivateKey + for _, block := range blocks { + switch block.Type { + case "CERTIFICATE": + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certs = append(certs, c) + case "PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + } + if err != nil { + return nil, nil, err + } + case "RSA PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + } + } + if len(certs) == 0 { + return nil, nil, errors.New("found no certificate") + } + if pk == nil { + return nil, nil, errors.New("found no private key") + } + return certs, pk, nil +} + +func loadPEMCert(certData []byte) ([]*pem.Block, error) { + blocks := []*pem.Block{} + for { + var block *pem.Block + block, certData = pem.Decode(certData) + if block == nil { + break + } + blocks = append(blocks, block) + } + if len(blocks) == 0 { + return nil, errors.New("didn't find any PEM blocks") + } + return blocks, nil +} + +func loadPKCS12Cert(certData []byte, password string) ([]*pem.Block, error) { + blocks, err := pkcs12.ToPEM(certData, password) + if err != nil { + return nil, err + } + if len(blocks) == 0 { + // not mentioning PKCS12 in this message because we end up here when certData is garbage + return nil, errors.New("didn't find any certificate content") + } + return blocks, err +} + +var _ azcore.TokenCredential = (*ClientCertificateCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go new file mode 100644 index 00000000000..dda21f6b88d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameSecret = "ClientSecretCredential" + +// ClientSecretCredentialOptions contains optional parameters for ClientSecretCredential. +type ClientSecretCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// ClientSecretCredential authenticates an application with a client secret. +type ClientSecretCredential struct { + client confidentialClient + s *syncer +} + +// NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. +func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) { + if options == nil { + options = &ClientSecretCredentialOptions{} + } + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + c, err := getConfidentialClient( + clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery), + ) + if err != nil { + return nil, err + } + csc := ClientSecretCredential{client: c} + csc.s = newSyncer(credNameSecret, tenantID, options.AdditionallyAllowedTenants, csc.requestToken, csc.silentAuth) + return &csc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientSecretCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientSecretCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go new file mode 100644 index 00000000000..1e3efdc97a9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -0,0 +1,209 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential. +// These options may not apply to all credentials in the chain. +type DefaultAzureCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add + // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be + // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID identifies the tenant the Azure CLI should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI. + TenantID string +} + +// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. +// It combines credentials suitable for deployment with credentials suitable for local development. +// It attempts to authenticate with each of these credential types, in the following order, stopping +// when one provides a token: +// +// - [EnvironmentCredential] +// - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload +// identity webhook. Use [WorkloadIdentityCredential] directly when not using the webhook or needing +// more control over its configuration. +// - [ManagedIdentityCredential] +// - [AzureCLICredential] +// +// Consult the documentation for these credential types for more information on how they authenticate. +// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for +// every subsequent authentication. +type DefaultAzureCredential struct { + chain *ChainedTokenCredential +} + +// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults. +func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) { + var creds []azcore.TokenCredential + var errorMessages []string + + if options == nil { + options = &DefaultAzureCredentialOptions{} + } + additionalTenants := options.AdditionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + + envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + additionallyAllowedTenants: additionalTenants, + }) + if err == nil { + creds = append(creds, envCred) + } else { + errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err}) + } + + // workload identity requires values for AZURE_AUTHORITY_HOST, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE, AZURE_TENANT_ID + wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + }) + if err == nil { + creds = append(creds, wic) + } else { + errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) + } + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} + if ID, ok := os.LookupEnv(azureClientID); ok { + o.ID = ClientID(ID) + } + miCred, err := NewManagedIdentityCredential(o) + if err == nil { + creds = append(creds, &timeoutWrapper{mic: miCred, timeout: time.Second}) + } else { + errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) + } + + cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID}) + if err == nil { + creds = append(creds, cliCred) + } else { + errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) + } + + err = defaultAzureCredentialConstructorErrorHandler(len(creds), errorMessages) + if err != nil { + return nil, err + } + + chain, err := NewChainedTokenCredential(creds, nil) + if err != nil { + return nil, err + } + chain.name = "DefaultAzureCredential" + return &DefaultAzureCredential{chain: chain}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.chain.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil) + +func defaultAzureCredentialConstructorErrorHandler(numberOfSuccessfulCredentials int, errorMessages []string) (err error) { + errorMessage := strings.Join(errorMessages, "\n\t") + + if numberOfSuccessfulCredentials == 0 { + return errors.New(errorMessage) + } + + if len(errorMessages) != 0 { + log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", errorMessage) + } + + return nil +} + +// defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed. +// Its GetToken method always returns a credentialUnavailableError having the same message as +// the error that prevented constructing the credential. This ensures the message is present +// in the error returned by ChainedTokenCredential.GetToken() +type defaultCredentialErrorReporter struct { + credType string + err error +} + +func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if _, ok := d.err.(*credentialUnavailableError); ok { + return azcore.AccessToken{}, d.err + } + return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) +} + +var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) + +// timeoutWrapper prevents a potentially very long timeout when managed identity isn't available +type timeoutWrapper struct { + mic *ManagedIdentityCredential + // timeout applies to all auth attempts until one doesn't time out + timeout time.Duration +} + +// GetToken wraps DefaultAzureCredential's initial managed identity auth attempt with a short timeout +// because managed identity may not be available and connecting to IMDS can take several minutes to time out. +func (w *timeoutWrapper) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var tk azcore.AccessToken + var err error + // no need to synchronize around this value because it's written only within ChainedTokenCredential's critical section + if w.timeout > 0 { + c, cancel := context.WithTimeout(ctx, w.timeout) + defer cancel() + tk, err = w.mic.GetToken(c, opts) + if isAuthFailedDueToContext(err) { + err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out") + } else { + // some managed identity implementation is available, so don't apply the timeout to future calls + w.timeout = 0 + } + } else { + tk, err = w.mic.GetToken(ctx, opts) + } + return tk, err +} + +// unwraps nested AuthenticationFailedErrors to get the root error +func isAuthFailedDueToContext(err error) bool { + for { + var authFailedErr *AuthenticationFailedError + if !errors.As(err, &authFailedErr) { + break + } + err = authFailedErr.err + } + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go new file mode 100644 index 00000000000..108e83c43ae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -0,0 +1,136 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameDeviceCode = "DeviceCodeCredential" + +// DeviceCodeCredentialOptions contains optional parameters for DeviceCodeCredential. +type DeviceCodeCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant + // applications. + TenantID string + + // UserPrompt controls how the credential presents authentication instructions. The credential calls + // this function with authentication details when it receives a device code. By default, the credential + // prints these details to stdout. + UserPrompt func(context.Context, DeviceCodeMessage) error +} + +func (o *DeviceCodeCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } + if o.UserPrompt == nil { + o.UserPrompt = func(ctx context.Context, dc DeviceCodeMessage) error { + fmt.Println(dc.Message) + return nil + } + } +} + +// DeviceCodeMessage contains the information a user needs to complete authentication. +type DeviceCodeMessage struct { + // UserCode is the user code returned by the service. + UserCode string `json:"user_code"` + // VerificationURL is the URL at which the user must authenticate. + VerificationURL string `json:"verification_uri"` + // Message is user instruction from Azure Active Directory. + Message string `json:"message"` +} + +// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the +// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// for authenticating a user in an environment without a web browser, such as an SSH session. +// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// automatically opens a browser to the login page. +type DeviceCodeCredential struct { + account public.Account + client publicClient + s *syncer + prompt func(context.Context, DeviceCodeMessage) error +} + +// NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options. +func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeCredential, error) { + cp := DeviceCodeCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient( + cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery), + ) + if err != nil { + return nil, err + } + cred := DeviceCodeCredential{client: c, prompt: cp.UserPrompt} + cred.s = newSyncer(credNameDeviceCode, cp.TenantID, cp.AdditionallyAllowedTenants, cred.requestToken, cred.silentAuth) + return &cred, nil +} + +// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. +// This method is called automatically by Azure SDK clients. +func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *DeviceCodeCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + dc, err := c.client.AcquireTokenByDeviceCode(ctx, opts.Scopes, public.WithTenantID(opts.TenantID)) + if err != nil { + return azcore.AccessToken{}, err + } + err = c.prompt(ctx, DeviceCodeMessage{ + Message: dc.Result.Message, + UserCode: dc.Result.UserCode, + VerificationURL: dc.Result.VerificationURL, + }) + if err != nil { + return azcore.AccessToken{}, err + } + ar, err := dc.AuthenticationResult(ctx) + if err != nil { + return azcore.AccessToken{}, err + } + c.account = ar.Account + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *DeviceCodeCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go new file mode 100644 index 00000000000..7ecd928e024 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN" + +// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential +type EnvironmentCredentialOptions struct { + azcore.ClientOptions + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // additionallyAllowedTenants is used only by NewDefaultAzureCredential() to enable that constructor's explicit + // option to override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS. Applications using EnvironmentCredential + // directly should set that variable instead. This field should remain unexported to preserve this credential's + // unambiguous "all configuration from environment variables" design. + additionallyAllowedTenants []string +} + +// EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending +// on environment variable configuration. It reads configuration from these variables, in the following order: +// +// # Service principal with client secret +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_SECRET: one of the service principal's client secrets +// +// # Service principal with certificate +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the private key. +// +// AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. +// +// # User with username and password +// +// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". +// +// AZURE_CLIENT_ID: client ID of the application the user will authenticate to +// +// AZURE_USERNAME: a username (usually an email address) +// +// AZURE_PASSWORD: the user's password +// +// # Configuration for multitenant applications +// +// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants +// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set +// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant. +type EnvironmentCredential struct { + cred azcore.TokenCredential +} + +// NewEnvironmentCredential creates an EnvironmentCredential. Pass nil to accept default options. +func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*EnvironmentCredential, error) { + if options == nil { + options = &EnvironmentCredentialOptions{} + } + tenantID := os.Getenv(azureTenantID) + if tenantID == "" { + return nil, errors.New("missing environment variable AZURE_TENANT_ID") + } + clientID := os.Getenv(azureClientID) + if clientID == "" { + return nil, errors.New("missing environment variable " + azureClientID) + } + // tenants set by NewDefaultAzureCredential() override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS + additionalTenants := options.additionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + if clientSecret := os.Getenv(azureClientSecret); clientSecret != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential") + o := &ClientSecretCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if certPath := os.Getenv(azureClientCertificatePath); certPath != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential") + certData, err := os.ReadFile(certPath) + if err != nil { + return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err) + } + var password []byte + if v := os.Getenv(azureClientCertificatePassword); v != "" { + password = []byte(v) + } + certs, key, err := ParseCertificates(certData, password) + if err != nil { + return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) + } + o := &ClientCertificateCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + if v, ok := os.LookupEnv(envVarSendCertChain); ok { + o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true" + } + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if username := os.Getenv(azureUsername); username != "" { + if password := os.Getenv(azurePassword); password != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential") + o := &UsernamePasswordCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + return nil, errors.New("no value for AZURE_PASSWORD") + } + return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.cred.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*EnvironmentCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go new file mode 100644 index 00000000000..86d8976a4b2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" +) + +// getResponseFromError retrieves the response carried by +// an AuthenticationFailedError or MSAL CallErr, if any +func getResponseFromError(err error) *http.Response { + var a *AuthenticationFailedError + var c msal.CallErr + var res *http.Response + if errors.As(err, &c) { + res = c.Resp + } else if errors.As(err, &a) { + res = a.RawResponse + } + return res +} + +// AuthenticationFailedError indicates an authentication request has failed. +type AuthenticationFailedError struct { + // RawResponse is the HTTP response motivating the error, if available. + RawResponse *http.Response + + credType string + message string + err error +} + +func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *AuthenticationFailedError) Error() string { + if e.RawResponse == nil { + return e.credType + ": " + e.message + } + msg := &bytes.Buffer{} + fmt.Fprintf(msg, e.credType+" authentication failed\n") + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := io.ReadAll(e.RawResponse.Body) + e.RawResponse.Body.Close() + if err != nil { + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + e.RawResponse.Body = io.NopCloser(bytes.NewReader(body)) + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + } else { + fmt.Fprint(msg, "Response contained no body") + } + fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") + var anchor string + switch e.credType { + case credNameAzureCLI: + anchor = "azure-cli" + case credNameCert: + anchor = "client-cert" + case credNameSecret: + anchor = "client-secret" + case credNameManagedIdentity: + anchor = "managed-id" + case credNameUserPassword: + anchor = "username-password" + case credNameWorkloadIdentity: + anchor = "workload" + } + if anchor != "" { + fmt.Fprintf(msg, "To troubleshoot, visit https://aka.ms/azsdk/go/identity/troubleshoot#%s", anchor) + } + return msg.String() +} + +// NonRetriable indicates the request which provoked this error shouldn't be retried. +func (*AuthenticationFailedError) NonRetriable() { + // marker method +} + +var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) + +// credentialUnavailableError indicates a credential can't attempt authentication because it lacks required +// data or state +type credentialUnavailableError struct { + message string +} + +// newCredentialUnavailableError is an internal helper that ensures consistent error message formatting +func newCredentialUnavailableError(credType, message string) error { + msg := fmt.Sprintf("%s: %s", credType, message) + return &credentialUnavailableError{msg} +} + +// NewCredentialUnavailableError constructs an error indicating a credential can't attempt authentication +// because it lacks required data or state. When [ChainedTokenCredential] receives this error it will try +// its next credential, if any. +func NewCredentialUnavailableError(message string) error { + return &credentialUnavailableError{message} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *credentialUnavailableError) Error() string { + return e.message +} + +// NonRetriable is a marker method indicating this error should not be retried. It has no implementation. +func (e *credentialUnavailableError) NonRetriable() {} + +var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go new file mode 100644 index 00000000000..4868d22c3e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -0,0 +1,106 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameBrowser = "InteractiveBrowserCredential" + +// InteractiveBrowserCredentialOptions contains optional parameters for InteractiveBrowserCredential. +type InteractiveBrowserCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. + LoginHint string + // RedirectURL is the URL Azure Active Directory will redirect to with the access token. This is required + // only when setting ClientID, and must match a redirect URI in the application's registration. + // Applications which have registered "http://localhost" as a redirect URI need not set this option. + RedirectURL string + + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. + TenantID string +} + +func (o *InteractiveBrowserCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } +} + +// InteractiveBrowserCredential opens a browser to interactively authenticate a user. +type InteractiveBrowserCredential struct { + account public.Account + client publicClient + options InteractiveBrowserCredentialOptions + s *syncer +} + +// NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options. +func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOptions) (*InteractiveBrowserCredential, error) { + cp := InteractiveBrowserCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + ibc := InteractiveBrowserCredential{client: c, options: cp} + ibc.s = newSyncer(credNameBrowser, cp.TenantID, cp.AdditionallyAllowedTenants, ibc.requestToken, ibc.silentAuth) + return &ibc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *InteractiveBrowserCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenInteractive(ctx, opts.Scopes, + public.WithLoginHint(c.options.LoginHint), + public.WithRedirectURI(c.options.RedirectURL), + public.WithTenantID(opts.TenantID), + ) + if err == nil { + c.account = ar.Account + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *InteractiveBrowserCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go new file mode 100644 index 00000000000..1aa1e0fc7c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + +// EventAuthentication entries contain information about authentication. +// This includes information like the names of environment variables +// used when obtaining credentials and the type of credential used. +const EventAuthentication log.Event = "Authentication" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go new file mode 100644 index 00000000000..d7b4a32a544 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -0,0 +1,388 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const ( + arcIMDSEndpoint = "IMDS_ENDPOINT" + identityEndpoint = "IDENTITY_ENDPOINT" + identityHeader = "IDENTITY_HEADER" + identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" + headerMetadata = "Metadata" + imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + msiEndpoint = "MSI_ENDPOINT" + imdsAPIVersion = "2018-02-01" + azureArcAPIVersion = "2019-08-15" + serviceFabricAPIVersion = "2019-07-01-preview" + + qpClientID = "client_id" + qpResID = "mi_res_id" +) + +type msiType int + +const ( + msiTypeAppService msiType = iota + msiTypeAzureArc + msiTypeCloudShell + msiTypeIMDS + msiTypeServiceFabric +) + +// managedIdentityClient provides the base for authenticating in managed identity environments +// This type includes an runtime.Pipeline and TokenCredentialOptions. +type managedIdentityClient struct { + pipeline runtime.Pipeline + msiType msiType + endpoint string + id ManagedIDKind +} + +type wrappedNumber json.Number + +func (n *wrappedNumber) UnmarshalJSON(b []byte) error { + c := string(b) + if c == "\"\"" { + return nil + } + return json.Unmarshal(b, (*json.Number)(n)) +} + +// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS +func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = 5 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 1 * time.Minute + } + if o.RetryDelay == 0 { + o.RetryDelay = 2 * time.Second + } + if o.StatusCodes == nil { + o.StatusCodes = []int{ + // IMDS docs recommend retrying 404, 429 and all 5xx + // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + http.StatusNotFound, // 404 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusNotImplemented, // 501 + http.StatusBadGateway, // 502 + http.StatusGatewayTimeout, // 504 + http.StatusHTTPVersionNotSupported, // 505 + http.StatusVariantAlsoNegotiates, // 506 + http.StatusInsufficientStorage, // 507 + http.StatusLoopDetected, // 508 + http.StatusNotExtended, // 510 + http.StatusNetworkAuthenticationRequired, // 511 + } + } + if o.TryTimeout == 0 { + o.TryTimeout = 1 * time.Minute + } +} + +// newManagedIdentityClient creates a new instance of the ManagedIdentityClient with the ManagedIdentityCredentialOptions +// that are passed into it along with a default pipeline. +// options: ManagedIdentityCredentialOptions configure policies for the pipeline and the authority host that +// will be used to retrieve tokens and authenticate +func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*managedIdentityClient, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + cp := options.ClientOptions + c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS} + env := "IMDS" + if endpoint, ok := os.LookupEnv(identityEndpoint); ok { + if _, ok := os.LookupEnv(identityHeader); ok { + if _, ok := os.LookupEnv(identityServerThumbprint); ok { + env = "Service Fabric" + c.endpoint = endpoint + c.msiType = msiTypeServiceFabric + } else { + env = "App Service" + c.endpoint = endpoint + c.msiType = msiTypeAppService + } + } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + env = "Azure Arc" + c.endpoint = endpoint + c.msiType = msiTypeAzureArc + } + } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { + env = "Cloud Shell" + c.endpoint = endpoint + c.msiType = msiTypeCloudShell + } else { + setIMDSRetryOptionDefaults(&cp.Retry) + } + c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + if log.Should(EventAuthentication) { + log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) + } + + return &c, nil +} + +// provideToken acquires a token for MSAL's confidential.Client, which caches the token +func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { + result := confidential.TokenProviderResult{} + tk, err := c.authenticate(ctx, c.id, params.Scopes) + if err == nil { + result.AccessToken = tk.Token + result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) + } + return result, err +} + +// authenticate acquires an access token +func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + msg, err := c.createAuthRequest(ctx, id, scopes) + if err != nil { + return azcore.AccessToken{}, err + } + + resp, err := c.pipeline.Do(msg) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) + } + + if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return c.createAccessToken(resp) + } + + if c.msiType == msiTypeIMDS && resp.StatusCode == 400 { + if id != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "no default identity is assigned to this resource") + } + + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) +} + +func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { + value := struct { + // these are the only fields that we use + Token string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid + ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string + }{} + if err := runtime.UnmarshalAsJSON(res, &value); err != nil { + return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) + } + if value.ExpiresIn != "" { + expiresIn, err := json.Number(value.ExpiresIn).Int64() + if err != nil { + return azcore.AccessToken{}, err + } + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil + } + switch v := value.ExpiresOn.(type) { + case float64: + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil + case string: + if expiresOn, err := strconv.Atoi(v); err == nil { + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil + } + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil) + default: + msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil) + } +} + +func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + switch c.msiType { + case msiTypeIMDS: + return c.createIMDSAuthRequest(ctx, id, scopes) + case msiTypeAppService: + return c.createAppServiceAuthRequest(ctx, id, scopes) + case msiTypeAzureArc: + // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service + key, err := c.getAzureArcSecretKey(ctx, scopes) + if err != nil { + msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) + } + return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeServiceFabric: + return c.createServiceFabricAuthRequest(ctx, id, scopes) + case msiTypeCloudShell: + return c.createCloudShellAuthRequest(ctx, id, scopes) + default: + return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") + } +} + +func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", imdsAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) + q := request.Raw().URL.Query() + q.Add("api-version", "2019-08-01") + q.Add("resource", scopes[0]) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + q := request.Raw().URL.Query() + request.Raw().Header.Set("Accept", "application/json") + request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) + q.Add("api-version", serviceFabricAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { + // create the request to retreive the secret key challenge provided by the HIMDS service + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return "", err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + request.Raw().URL.RawQuery = q.Encode() + // send the initial request to get the short-lived secret key + response, err := c.pipeline.Do(request) + if err != nil { + return "", err + } + // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location + // of the secret key file. Any other status code indicates an error in the request. + if response.StatusCode != 401 { + msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil) + } + header := response.Header.Get("WWW-Authenticate") + if len(header) == 0 { + return "", errors.New("did not receive a value from WWW-Authenticate header") + } + // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key + pos := strings.LastIndex(header, "=") + if pos == -1 { + return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) + } + key, err := os.ReadFile(header[pos+1:]) + if err != nil { + return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) + } + return string(key), nil +} + +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + data := url.Values{} + data.Set("resource", strings.Join(scopes, " ")) + dataEncoded := data.Encode() + body := streaming.NopCloser(strings.NewReader(dataEncoded)) + if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { + return nil, err + } + if id != nil { + log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") + q := request.Raw().URL.Query() + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + return request, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go new file mode 100644 index 00000000000..c6710ae52ae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -0,0 +1,127 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameManagedIdentity = "ManagedIdentityCredential" + +type managedIdentityIDKind int + +const ( + miClientID managedIdentityIDKind = 0 + miResourceID managedIdentityIDKind = 1 +) + +// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID +type ManagedIDKind interface { + fmt.Stringer + idKind() managedIdentityIDKind +} + +// ClientID is the client ID of a user-assigned managed identity. +type ClientID string + +func (ClientID) idKind() managedIdentityIDKind { + return miClientID +} + +// String returns the string value of the ID. +func (c ClientID) String() string { + return string(c) +} + +// ResourceID is the resource ID of a user-assigned managed identity. +type ResourceID string + +func (ResourceID) idKind() managedIdentityIDKind { + return miResourceID +} + +// String returns the string value of the ID. +func (r ResourceID) String() string { + return string(r) +} + +// ManagedIdentityCredentialOptions contains optional parameters for ManagedIdentityCredential. +type ManagedIdentityCredentialOptions struct { + azcore.ClientOptions + + // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity + // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that + // some platforms don't accept resource IDs. + ID ManagedIDKind +} + +// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a +// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: +// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +type ManagedIdentityCredential struct { + client confidentialClient + mic *managedIdentityClient + s *syncer +} + +// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. +func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*ManagedIdentityCredential, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + mic, err := newManagedIdentityClient(options) + if err != nil { + return nil, err + } + cred := confidential.NewCredFromTokenProvider(mic.provideToken) + + // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. + // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. + clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" + if options.ID != nil { + clientID = options.ID.String() + } + // similarly, it's okay to give MSAL an incorrect authority URL because that URL won't be used + c, err := confidential.New("https://login.microsoftonline.com/common", clientID, cred) + if err != nil { + return nil, err + } + m := ManagedIdentityCredential{client: c, mic: mic} + m.s = newSyncer(credNameManagedIdentity, "", nil, m.requestToken, m.silentAuth) + return &m, nil +} + +// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. +func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + err := errors.New(credNameManagedIdentity + ": GetToken() requires exactly one scope") + return azcore.AccessToken{}, err + } + // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + return c.s.GetToken(ctx, opts) +} + +func (c *ManagedIdentityCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ManagedIdentityCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go new file mode 100644 index 00000000000..3e173f47d26 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -0,0 +1,99 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameOBO = "OnBehalfOfCredential" + +// OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by +// middle-tier services that authorize requests to other services with a delegated user identity. Because this +// is not an interactive authentication flow, an application using it must have admin consent for any delegated +// permissions before requesting tokens for them. See [Azure Active Directory documentation] for more details. +// +// [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +type OnBehalfOfCredential struct { + assertion string + client confidentialClient + s *syncer +} + +// OnBehalfOfCredentialOptions contains optional parameters for OnBehalfOfCredential +type OnBehalfOfCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. + // This setting controls whether the credential sends the public certificate chain in the x5c header of each + // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. + SendCertificateChain bool +} + +// NewOnBehalfOfCredentialWithCertificate constructs an OnBehalfOfCredential that authenticates with a certificate. +// See [ParseCertificates] for help loading a certificate. +func NewOnBehalfOfCredentialWithCertificate(tenantID, clientID, userAssertion string, certs []*x509.Certificate, key crypto.PrivateKey, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +// NewOnBehalfOfCredentialWithSecret constructs an OnBehalfOfCredential that authenticates with a client secret. +func NewOnBehalfOfCredentialWithSecret(tenantID, clientID, userAssertion, clientSecret string, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred confidential.Credential, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + if options == nil { + options = &OnBehalfOfCredentialOptions{} + } + opts := []confidential.Option{} + if options.SendCertificateChain { + opts = append(opts, confidential.WithX5C()) + } + opts = append(opts, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, opts...) + if err != nil { + return nil, err + } + obo := OnBehalfOfCredential{assertion: userAssertion, client: c} + obo.s = newSyncer(credNameOBO, tenantID, options.AdditionallyAllowedTenants, obo.requestToken, obo.requestToken) + return &obo, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return o.s.GetToken(ctx, opts) +} + +func (o *OnBehalfOfCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := o.client.AcquireTokenOnBehalfOf(ctx, o.assertion, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go new file mode 100644 index 00000000000..ae38555994b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type authFn func(context.Context, policy.TokenRequestOptions) (azcore.AccessToken, error) + +// syncer synchronizes authentication calls so that goroutines can share a credential instance +type syncer struct { + addlTenants []string + authing bool + cond *sync.Cond + reqToken, silent authFn + name, tenant string +} + +func newSyncer(name, tenant string, additionalTenants []string, reqToken, silentAuth authFn) *syncer { + return &syncer{ + addlTenants: resolveAdditionalTenants(additionalTenants), + cond: &sync.Cond{L: &sync.Mutex{}}, + name: name, + reqToken: reqToken, + silent: silentAuth, + tenant: tenant, + } +} + +// GetToken ensures that only one goroutine authenticates at a time +func (s *syncer) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var at azcore.AccessToken + var err error + if len(opts.Scopes) == 0 { + return at, errors.New(s.name + ".GetToken() requires at least one scope") + } + // we don't resolve the tenant for managed identities because they can acquire tokens only from their home tenants + if s.name != credNameManagedIdentity { + tenant, err := s.resolveTenant(opts.TenantID) + if err != nil { + return at, err + } + opts.TenantID = tenant + } + auth := false + s.cond.L.Lock() + defer s.cond.L.Unlock() + for { + at, err = s.silent(ctx, opts) + if err == nil { + // got a token + break + } + if !s.authing { + // this goroutine will request a token + s.authing, auth = true, true + break + } + // another goroutine is acquiring a token; wait for it to finish, then try silent auth again + s.cond.Wait() + } + if auth { + s.authing = false + at, err = s.reqToken(ctx, opts) + s.cond.Broadcast() + } + if err != nil { + // Return credentialUnavailableError directly because that type affects the behavior of credential chains. + // Otherwise, return AuthenticationFailedError. + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + res := getResponseFromError(err) + err = newAuthenticationFailedError(s.name, err.Error(), res, err) + } + } else if log.Should(EventAuthentication) { + scope := strings.Join(opts.Scopes, ", ") + msg := fmt.Sprintf(`%s.GetToken() acquired a token for scope "%s"\n`, s.name, scope) + log.Write(EventAuthentication, msg) + } + return at, err +} + +// resolveTenant returns the correct tenant for a token request given the credential's +// configuration, or an error when the specified tenant isn't allowed by that configuration +func (s *syncer) resolveTenant(requested string) (string, error) { + if requested == "" || requested == s.tenant { + return s.tenant, nil + } + if s.tenant == "adfs" { + return "", errors.New("ADFS doesn't support tenants") + } + if !validTenantID(requested) { + return "", errors.New(tenantIDValidationErr) + } + for _, t := range s.addlTenants { + if t == "*" || t == requested { + return requested, nil + } + } + return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, s.name, requested) +} + +// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard +func resolveAdditionalTenants(tenants []string) []string { + if len(tenants) == 0 { + return nil + } + for _, t := range tenants { + // a wildcard makes all other values redundant + if t == "*" { + return []string{"*"} + } + } + cp := make([]string, len(tenants)) + copy(cp, tenants) + return cp +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go new file mode 100644 index 00000000000..8e652e33ff6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -0,0 +1,81 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameUserPassword = "UsernamePasswordCredential" + +// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential. +type UsernamePasswordCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, +// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible +// with any form of multi-factor authentication, and the application must already have user or admin consent. +// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. +type UsernamePasswordCredential struct { + account public.Account + client publicClient + password, username string + s *syncer +} + +// NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user +// will authenticate to. Pass nil for options to accept defaults. +func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) { + if options == nil { + options = &UsernamePasswordCredentialOptions{} + } + c, err := getPublicClient(clientID, tenantID, &options.ClientOptions, public.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + upc := UsernamePasswordCredential{client: c, password: password, username: username} + upc.s = newSyncer(credNameUserPassword, tenantID, options.AdditionallyAllowedTenants, upc.requestToken, upc.silentAuth) + return &upc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *UsernamePasswordCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByUsernamePassword(ctx, opts.Scopes, c.username, c.password, public.WithTenantID(opts.TenantID)) + if err == nil { + c.account = ar.Account + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *UsernamePasswordCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go new file mode 100644 index 00000000000..1a526b2e874 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -0,0 +1,15 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + component = "azidentity" + + // Version is the semantic version (see http://semver.org) of this module. + version = "v1.3.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go new file mode 100644 index 00000000000..7bfb3436760 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -0,0 +1,126 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const credNameWorkloadIdentity = "WorkloadIdentityCredential" + +// WorkloadIdentityCredential supports Azure workload identity on Kubernetes. +// See [Azure Kubernetes Service documentation] for more information. +// +// [Azure Kubernetes Service documentation]: https://learn.microsoft.com/azure/aks/workload-identity-overview +type WorkloadIdentityCredential struct { + assertion, file string + cred *ClientAssertionCredential + expires time.Time + mtx *sync.RWMutex +} + +// WorkloadIdentityCredentialOptions contains optional parameters for WorkloadIdentityCredential. +type WorkloadIdentityCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. + ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. + TenantID string + // TokenFilePath is the path a file containing the workload identity token. Defaults to the value of the + // environment variable AZURE_FEDERATED_TOKEN_FILE. + TokenFilePath string +} + +// NewWorkloadIdentityCredential constructs a WorkloadIdentityCredential. Service principal configuration is read +// from environment variables as set by the Azure workload identity webhook. Set options to override those values. +func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (*WorkloadIdentityCredential, error) { + if options == nil { + options = &WorkloadIdentityCredentialOptions{} + } + ok := false + clientID := options.ClientID + if clientID == "" { + if clientID, ok = os.LookupEnv(azureClientID); !ok { + return nil, errors.New("no client ID specified. Check pod configuration or set ClientID in the options") + } + } + file := options.TokenFilePath + if file == "" { + if file, ok = os.LookupEnv(azureFederatedTokenFile); !ok { + return nil, errors.New("no token file specified. Check pod configuration or set TokenFilePath in the options") + } + } + tenantID := options.TenantID + if tenantID == "" { + if tenantID, ok = os.LookupEnv(azureTenantID); !ok { + return nil, errors.New("no tenant ID specified. Check pod configuration or set TenantID in the options") + } + } + w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}} + caco := ClientAssertionCredentialOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientAssertionCredential(tenantID, clientID, w.getAssertion, &caco) + if err != nil { + return nil, err + } + // we want "WorkloadIdentityCredential" in log messages, not "ClientAssertionCredential" + cred.s.name = credNameWorkloadIdentity + w.cred = cred + return &w, nil +} + +// GetToken requests an access token from Azure Active Directory. Azure SDK clients call this method automatically. +func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return w.cred.GetToken(ctx, opts) +} + +// getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token. +// Kubernetes is responsible for updating the file as service account tokens expire. +func (w *WorkloadIdentityCredential) getAssertion(context.Context) (string, error) { + w.mtx.RLock() + if w.expires.Before(time.Now()) { + // ensure only one goroutine at a time updates the assertion + w.mtx.RUnlock() + w.mtx.Lock() + defer w.mtx.Unlock() + // double check because another goroutine may have acquired the write lock first and done the update + if now := time.Now(); w.expires.Before(now) { + content, err := os.ReadFile(w.file) + if err != nil { + return "", err + } + w.assertion = string(content) + // Kubernetes rotates service account tokens when they reach 80% of their total TTL. The shortest TTL + // is 1 hour. That implies the token we just read is valid for at least 12 minutes (20% of 1 hour), + // but we add some margin for safety. + w.expires = now.Add(10 * time.Minute) + } + } else { + defer w.mtx.RUnlock() + } + return w.assertion, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt new file mode 100644 index 00000000000..48ea6616b5b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go new file mode 100644 index 00000000000..245af7d2bec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag + +import ( + "fmt" + "runtime" + "strings" +) + +// Caller returns the file and line number of a frame on the caller's stack. +// If the funtion fails an empty string is returned. +// skipFrames - the number of frames to skip when determining the caller. +// Passing a value of 0 will return the immediate caller of this function. +func Caller(skipFrames int) string { + if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { + // the skipFrames + 1 is to skip ourselves + frame := runtime.FuncForPC(pc) + return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) + } + return "" +} + +// StackTrace returns a formatted stack trace string. +// If the funtion fails an empty string is returned. +// skipFrames - the number of stack frames to skip before composing the trace string. +// totalFrames - the maximum number of stack frames to include in the trace string. +func StackTrace(skipFrames, totalFrames int) string { + pcCallers := make([]uintptr, totalFrames) + if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { + return "" + } + frames := runtime.CallersFrames(pcCallers) + sb := strings.Builder{} + for { + frame, more := frames.Next() + sb.WriteString(frame.Function) + sb.WriteString("()\n\t") + sb.WriteString(frame.File) + sb.WriteRune(':') + sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) + if !more { + break + } + } + return sb.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go new file mode 100644 index 00000000000..66bf13e5f04 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go new file mode 100644 index 00000000000..8c6eacb618a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go new file mode 100644 index 00000000000..ade7b348e30 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo + +// NonRetriable represents a non-transient error. This works in +// conjunction with the retry policy, indicating that the error condition +// is idempotent, so no retries will be attempted. +// Use errors.As() to access this interface in the error chain. +type NonRetriable interface { + error + NonRetriable() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go new file mode 100644 index 00000000000..d4ed6ccc8ad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go @@ -0,0 +1,124 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "io" + "net/http" +) + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// PayloadOptions contains the optional values for the Payload func. +// NOT exported but used by azcore. +type PayloadOptions struct { + // BytesModifier receives the downloaded byte slice and returns an updated byte slice. + // Use this to modify the downloaded bytes in a payload (e.g. removing a BOM). + BytesModifier func([]byte) []byte +} + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +// Exported as runtime.Payload() WITHOUT the opts parameter. +func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { + modifyBytes := func(b []byte) []byte { return b } + if opts != nil && opts.BytesModifier != nil { + modifyBytes = opts.BytesModifier + } + + // r.Body won't be a nopClosingBytesReader if downloading was skipped + if buf, ok := resp.Body.(*nopClosingBytesReader); ok { + bytesBody := modifyBytes(buf.Bytes()) + buf.Set(bytesBody) + return bytesBody, nil + } + + bytesBody, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + + bytesBody = modifyBytes(bytesBody) + resp.Body = &nopClosingBytesReader{s: bytesBody} + return bytesBody, nil +} + +// PayloadDownloaded returns true if the response body has already been downloaded. +// This implies that the Payload() func above has been previously called. +// NOT exported but used by azcore. +func PayloadDownloaded(resp *http.Response) bool { + _, ok := resp.Body.(*nopClosingBytesReader) + return ok +} + +// nopClosingBytesReader is an io.ReadSeekCloser around a byte slice. +// It also provides direct access to the byte slice to avoid rereading. +type nopClosingBytesReader struct { + s []byte + i int64 +} + +// Bytes returns the underlying byte slice. +func (r *nopClosingBytesReader) Bytes() []byte { + return r.s +} + +// Close implements the io.Closer interface. +func (*nopClosingBytesReader) Close() error { + return nil +} + +// Read implements the io.Reader interface. +func (r *nopClosingBytesReader) Read(b []byte) (n int, err error) { + if r.i >= int64(len(r.s)) { + return 0, io.EOF + } + n = copy(b, r.s[r.i:]) + r.i += int64(n) + return +} + +// Set replaces the existing byte slice with the specified byte slice and resets the reader. +func (r *nopClosingBytesReader) Set(b []byte) { + r.s = b + r.i = 0 +} + +// Seek implements the io.Seeker interface. +func (r *nopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { + var i int64 + switch whence { + case io.SeekStart: + i = offset + case io.SeekCurrent: + i = r.i + offset + case io.SeekEnd: + i = int64(len(r.s)) + offset + default: + return 0, errors.New("nopClosingBytesReader: invalid whence") + } + if i < 0 { + return 0, errors.New("nopClosingBytesReader: negative position") + } + r.i = i + return i, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go new file mode 100644 index 00000000000..d7876d297ae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go new file mode 100644 index 00000000000..4f1dcf1b78a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log + +import ( + "fmt" + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Event is used to group entries. Each group can be toggled on or off. +type Event string + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +func SetEvents(cls ...Event) { + log.cls = cls +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.lst = lst +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// END PUBLIC SURFACE AREA +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls Event) bool { + if log.lst == nil { + return false + } + if log.cls == nil || len(log.cls) == 0 { + return true + } + for _, c := range log.cls { + if c == cls { + return true + } + } + return false +} + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls Event, message string) { + if !Should(cls) { + return + } + log.lst(cls, message) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls Event, format string, a ...interface{}) { + if !Should(cls) { + return + } + log.lst(cls, fmt.Sprintf(format, a...)) +} + +// TestResetEvents is used for TESTING PURPOSES ONLY. +func TestResetEvents() { + log.cls = nil +} + +// logger controls which events to log and writing to the underlying log. +type logger struct { + cls []Event + lst func(Event, string) +} + +// the process-wide logger +var log logger + +func init() { + initLogging() +} + +// split out for testing purposes +func initLogging() { + if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { + // cls could be enhanced to support a comma-delimited list of log events + log.lst = func(cls Event, msg string) { + // simple console logger, it writes to stderr in the following format: + // [time-stamp] Event: message + fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go new file mode 100644 index 00000000000..db8269627d3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go @@ -0,0 +1,155 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package poller + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// the well-known set of LRO status/provisioning state values. +const ( + StatusSucceeded = "Succeeded" + StatusCanceled = "Canceled" + StatusFailed = "Failed" + StatusInProgress = "InProgress" +) + +// these are non-conformant states that we've seen in the wild. +// we support them for back-compat. +const ( + StatusCancelled = "Cancelled" + StatusCompleted = "Completed" +) + +// IsTerminalState returns true if the LRO's state is terminal. +func IsTerminalState(s string) bool { + return Failed(s) || Succeeded(s) +} + +// Failed returns true if the LRO's state is terminal failure. +func Failed(s string) bool { + return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) || strings.EqualFold(s, StatusCancelled) +} + +// Succeeded returns true if the LRO's state is terminal success. +func Succeeded(s string) bool { + return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusCompleted) +} + +// returns true if the LRO response contains a valid HTTP status code +func StatusCodeValid(resp *http.Response) bool { + return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) +} + +// IsValidURL verifies that the URL is valid and absolute. +func IsValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]any, error) { + body, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // unmarshall the body to get the value + var jsonBody map[string]any + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// provisioningState returns the provisioning state from the response or the empty string. +func provisioningState(jsonBody map[string]any) string { + jsonProps, ok := jsonBody["properties"] + if !ok { + return "" + } + props, ok := jsonProps.(map[string]any) + if !ok { + return "" + } + rawPs, ok := props["provisioningState"] + if !ok { + return "" + } + ps, ok := rawPs.(string) + if !ok { + return "" + } + return ps +} + +// status returns the status from the response or the empty string. +func status(jsonBody map[string]any) string { + rawStatus, ok := jsonBody["status"] + if !ok { + return "" + } + status, ok := rawStatus.(string) + if !ok { + return "" + } + return status +} + +// GetStatus returns the LRO's status from the response body. +// Typically used for Azure-AsyncOperation flows. +// If there is no status in the response body the empty string is returned. +func GetStatus(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return status(jsonBody), nil +} + +// GetProvisioningState returns the LRO's state from the response body. +// If there is no state in the response body the empty string is returned. +func GetProvisioningState(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return provisioningState(jsonBody), nil +} + +// GetResourceLocation returns the LRO's resourceLocation value from the response body. +// Typically used for Operation-Location flows. +// If there is no resourceLocation in the response body the empty string is returned. +func GetResourceLocation(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + v, ok := jsonBody["resourceLocation"] + if !ok { + // it might be ok if the field doesn't exist, the caller must make that determination + return "", nil + } + vv, ok := v.(string) + if !ok { + return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) + } + return vv, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go new file mode 100644 index 00000000000..238ef42ed03 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package temporal + +import ( + "sync" + "time" +) + +// AcquireResource abstracts a method for refreshing a temporal resource. +type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) + +// Resource is a temporal resource (usually a credential) that requires periodic refreshing. +type Resource[TResource, TState any] struct { + // cond is used to synchronize access to the shared resource embodied by the remaining fields + cond *sync.Cond + + // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource + acquiring bool + + // resource contains the value of the shared resource + resource TResource + + // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired + expiration time.Time + + // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource + lastAttempt time.Time + + // acquireResource is the callback function that actually acquires the resource + acquireResource AcquireResource[TResource, TState] +} + +// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { + return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} +} + +// Get returns the underlying resource. +// If the resource is fresh, no refresh is performed. +func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { + // If the resource is expiring within this time window, update it eagerly. + // This allows other threads/goroutines to keep running by using the not-yet-expired + // resource value while one thread/goroutine updates the resource. + const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration + const backoff = 30 * time.Second // Minimum wait time between eager update attempts + + now, acquire, expired := time.Now(), false, false + + // acquire exclusive lock + er.cond.L.Lock() + resource := er.resource + + for { + expired = er.expiration.IsZero() || er.expiration.Before(now) + if expired { + // The resource was never acquired or has expired + if !er.acquiring { + // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // Getting here means that this thread/goroutine will wait for the updated resource + } else if er.expiration.Add(-window).Before(now) { + // The resource is valid but is expiring within the time window + if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted + // to do so within the last 30 seconds, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // This thread/goroutine will use the existing resource value while another updates it + resource = er.resource + break + } else { + // The resource is not close to expiring, this thread/goroutine should use its current value + resource = er.resource + break + } + // If we get here, wait for the new resource value to be acquired/updated + er.cond.Wait() + } + er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked + + var err error + if acquire { + // This thread/goroutine has been selected to acquire/update the resource + var expiration time.Time + var newValue TResource + er.lastAttempt = now + newValue, expiration, err = er.acquireResource(state) + + // Atomically, update the shared resource's new value & expiration. + er.cond.L.Lock() + if err == nil { + // Update resource & expiration, return the new value + resource = newValue + er.resource, er.expiration = resource, expiration + } else if !expired { + // An eager update failed. Discard the error and return the current--still valid--resource value + err = nil + } + er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource + + // Wake up any waiting threads/goroutines since there is a resource they can ALL use + er.cond.L.Unlock() + er.cond.Broadcast() + } + return resource, err // Return the resource this thread/goroutine can use +} + +// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). +func (er *Resource[TResource, TState]) Expire() { + er.cond.L.Lock() + defer er.cond.L.Unlock() + + // Reset the expiration as if we never got this resource to begin with + er.expiration = time.Time{} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go new file mode 100644 index 00000000000..a3824bee8b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go new file mode 100644 index 00000000000..278ac9cd1c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go @@ -0,0 +1,76 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedRFC4122 byte = 0x40 +) + +// A UUID representation compliant with specification in RFC4122 document. +type UUID [16]byte + +// New returns a new UUID using the RFC4122 algorithm. +func New() (UUID, error) { + u := UUID{} + // Set all bits to pseudo-random values. + // NOTE: this takes a process-wide lock + _, err := rand.Read(u[:]) + if err != nil { + return u, err + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return u, nil +} + +// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (u UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. +func Parse(s string) (UUID, error) { + var uuid UUID + // ensure format + switch len(s) { + case 36: + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 38: + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + s = s[1:37] + default: + return uuid, errors.New("invalid UUID format") + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + // parse chunks + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + b, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return uuid, fmt.Errorf("invalid UUID format: %s", err) + } + uuid[i] = byte(b) + } + return uuid, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/CHANGELOG.md new file mode 100644 index 00000000000..95d3891378a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/CHANGELOG.md @@ -0,0 +1,99 @@ +# Release History + +## 0.10.0 (2023-04-13) + +### Features Added +* Upgraded to api version 7.4 + +### Breaking Changes +* This module is now DEPRECATED. The latest supported version of this module is at github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys +* Renamed `ActionType` to `KeyRotationPolicyAction` + +## 0.9.0 (2022-11-08) + +### Breaking Changes +* `NewClient` returns an `error` + +## 0.8.1 (2022-09-20) + +### Features Added +* Added `ClientOptions.DisableChallengeResourceVerification`. + See https://aka.ms/azsdk/blog/vault-uri for more information. + +## 0.8.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. + +## 0.7.0 (2022-08-09) + +### Breaking Changes +* Changed type of `NewClient` options parameter to `azkeys.ClientOptions`, which embeds + the former type, `azcore.ClientOptions` + +## 0.6.0 (2022-07-07) + +### Breaking Changes +* The `Client` API now corresponds more directly to the Key Vault REST API. + Most method signatures and types have changed. See the + [module documentation](https://aka.ms/azsdk/go/keyvault-keys/docs) + for updated code examples and more details. + +### Other Changes +* Upgrade to latest `azcore` + +## 0.5.1 (2022-05-12) + +### Other Changes +* Update to latest `azcore` and `internal` modules. + +## 0.5.0 (2022-04-06) + +### Features Added +* Added the Name property on `Key` + +### Breaking Changes +* Requires go 1.18 +* `ListPropertiesOfDeletedKeysPager` has `More() bool` and `NextPage(context.Context) (ListPropertiesOfDeletedKeysPage, error)` for paging over deleted keys. +* `ListPropertiesOfKeyVersionsPager` has `More() bool` and `NextPage(context.Context) (ListPropertiesOfKeyVersionsPage, error)` for paging over deleted keys. +* Removing `RawResponse *http.Response` from `crypto` response types + +## 0.4.0 (2022-03-08) + +### Features Added +* Adds the `ReleasePolicy` parameter to the `UpdateKeyPropertiesOptions` struct. +* Adds the `Immutable` boolean to the `KeyReleasePolicy` model. +* Added a `ToPtr` method on `KeyType` constant + +### Breaking Changes +* Requires go 1.18 +* Changed the `Data` to `EncodedPolicy` on the `KeyReleasePolicy` struct. +* Changed the `Updated`, `Created`, and `Expires` properties to `UpdatedOn`, `CreatedOn`, and `ExpiresOn`. +* Renamed `JSONWebKeyOperation` to `Operation`. +* Renamed `JSONWebKeyCurveName` to `CurveName` +* Prefixed all KeyType constants with `KeyType` +* Changed `KeyBundle` to `KeyVaultKey` and `DeletedKeyBundle` to `DeletedKey` +* Renamed `KeyAttributes` to `KeyProperties` +* Renamed `ListKeyVersions` to `ListPropertiesOfKeyVersions` +* Removed `Attributes` struct +* Changed `CreateOCTKey`/`Response`/`Options` to `CreateOctKey`/`Response`/`Options` +* Removed all `RawResponse *http.Response` fields from response structs. + +## 0.3.0 (2022-02-08) + +### Breaking Changes +* Changed the `Tags` properties from `map[string]*string` to `map[string]string` + +### Bugs Fixed +* Fixed a bug in `UpdateKeyProperties` where the `KeyOps` would be deleted if the `UpdateKeyProperties.KeyOps` value was left empty. + +## 0.2.0 (2022-01-12) + +### Bugs Fixed +* Fixes a bug in `crypto.NewClient` where the key version was required in the path, it is no longer required but is recommended. + +### Other Changes +* Updates `azcore` dependency from `v0.20.0` to `v0.21.0` + +## 0.1.0 (2021-11-09) +* This is the initial release of the `azkeys` library diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/LICENSE.txt new file mode 100644 index 00000000000..d1ca00f20a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/README.md new file mode 100644 index 00000000000..e6acf0dd084 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/README.md @@ -0,0 +1,147 @@ +# Azure Key Vault Keys client module for Go +> Deprecated: use github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys instead + +* Cryptographic key management (this module) - create, store, and control access to the keys used to encrypt your data +* Secrets management ([azsecrets](https://aka.ms/azsdk/go/keyvault-secrets/docs)) - securely store and control access to tokens, passwords, certificates, API keys, and other secrets +* Certificate management ([azcertificates](https://aka.ms/azsdk/go/keyvault-certificates/docs)) - create, manage, and deploy public and private SSL/TLS certificates + +[Source code][key_client_src] | [Package (pkg.go.dev)][goget_azkeys] | [Product documentation][keyvault_docs] | [Samples][keys_samples] + +## Getting started + +### Install packages + +Install `azkeys` and `azidentity` with `go get`: +```Bash +go get github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` +[azidentity][azure_identity] is used for Azure Active Directory authentication as demonstrated below. + +### Prerequisites + +* An [Azure subscription][azure_sub] +* A supported Go version (the Azure SDK supports the two most recent Go releases) +* A key vault. If you need to create one, see the Key Vault documentation for instructions on doing so in the [Azure Portal][azure_keyvault_portal] or with the [Azure CLI][azure_keyvault_cli]. + +### Authentication + +This document demonstrates using [azidentity.NewDefaultAzureCredential][default_cred_ref] to authenticate. This credential type works in both local development and production environments. We recommend using a [managed identity][managed_identity] in production. + +[Client][client_docs] accepts any [azidentity][azure_identity] credential. See the [azidentity][azure_identity] documentation for more information about other credential types. + +#### Create a client + +Constructing the client requires your vault's URL, which you can get from the Azure CLI or the Azure Portal. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys" +) + +func main() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + // TODO: handle error + } + + client, err := azkeys.NewClient("https://.vault.azure.net", cred, nil) + if err != nil { + // TODO: handle error + } +} +``` + +## Key concepts + +### Keys + +Azure Key Vault can create and store RSA and elliptic curve keys. Both can optionally be protected by hardware security modules (HSMs). Azure Key Vault can also perform cryptographic operations with them. For more information about keys and supported operations and algorithms, see the [Key Vault documentation](https://docs.microsoft.com/azure/key-vault/keys/about-keys). + +[Client][client_docs] can create keys in the vault, get existing keys from the vault, update key metadata, and delete keys, as shown in the examples below. + +## Examples + +Get started with our [examples](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys#pkg-examples). + +## Troubleshooting + +### Error Handling + +All methods which send HTTP requests return `*azcore.ResponseError` when these requests fail. `ResponseError` has error details and the raw response from Key Vault. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore" + +resp, err := client.GetKey(context.Background(), "keyName", nil) +if err != nil { + var httpErr *azcore.ResponseError + if errors.As(err, &httpErr) { + // TODO: investigate httpErr + } else { + // TODO: not an HTTP error + } +} +``` + +### Logging + +This module uses the logging implementation in `azcore`. To turn on logging for all Azure SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. By default the logger writes to stderr. Use the `azcore/log` package to control log output. For example, logging only HTTP request and response events, and printing them to stdout: + +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// Print log events to stdout +azlog.SetListener(func(cls azlog.Event, msg string) { + fmt.Println(msg) +}) + +// Includes only requests and responses in credential logs +azlog.SetEvents(azlog.EventRequest, azlog.EventResponse) +``` + +### Accessing `http.Response` + +You can access the raw `*http.Response` returned by Key Vault using the `runtime.WithCaptureResponse` method and a context passed to any client method. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +var response *http.Response +ctx := runtime.WithCaptureResponse(context.TODO(), &response) +_, err = client.GetKey(ctx, "keyName", nil) +if err != nil { + // TODO: handle error +} +// TODO: do something with response +``` + +### Additional Documentation + +For more extensive documentation on Azure Key Vault, see the [API reference documentation][reference_docs]. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact opencode@microsoft.com with any additional questions or comments. + + +[azure_identity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[azure_keyvault_cli]: https://docs.microsoft.com/azure/key-vault/general/quick-create-cli +[azure_keyvault_portal]: https://docs.microsoft.com/azure/key-vault/general/quick-create-portal +[azure_sub]: https://azure.microsoft.com/free/ +[default_cred_ref]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#NewDefaultAzureCredential +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[keyvault_docs]: https://docs.microsoft.com/azure/key-vault/ +[goget_azkeys]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys +[reference_docs]: https://aka.ms/azsdk/go/keyvault-keys/docs +[client_docs]: https://aka.ms/azsdk/go/keyvault-keys/docs#Client +[key_client_src]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/keyvault/azkeys/client.go +[keys_samples]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/keyvault/azkeys/example_test.go +[managed_identity]: https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fkeyvault%2Fazkeys%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/TROUBLESHOOTING.md new file mode 100644 index 00000000000..75933f67fb5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/TROUBLESHOOTING.md @@ -0,0 +1,4 @@ +# Troubleshoot Azure Key Vault Keys Client Module Issues + +See our [Azure Key Vault SDK Troubleshooting Guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/keyvault/TROUBLESHOOTING.md) +to troubleshoot issues common to Azure Key Vault client modules. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/autorest.md new file mode 100644 index 00000000000..0508436f9d3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/autorest.md @@ -0,0 +1,122 @@ +## Go + +```yaml +clear-output-folder: false +export-clients: true +go: true +input-file: https://github.com/Azure/azure-rest-api-specs/blob/551275acb80e1f8b39036b79dfc35a8f63b601a7/specification/keyvault/data-plane/Microsoft.KeyVault/stable/7.4/keys.json +license-header: MICROSOFT_MIT_NO_VERSION +module: github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys +openapi-type: "data-plane" +output-folder: ../azkeys +override-client-name: Client +security: "AADToken" +security-scopes: "https://vault.azure.net/.default" +use: "@autorest/go@4.0.0-preview.46" +version: "^3.0.0" + +directive: + # delete unused models + - remove-model: KeyExportParameters + - remove-model: KeyProperties + + # make vault URL a parameter of the client constructor + - from: swagger-document + where: $["x-ms-parameterized-host"] + transform: $.parameters[0]["x-ms-parameter-location"] = "client" + + # capitalize acronyms + - from: swagger-document + where: $.definitions.KeyImportParameters.properties.Hsm + transform: $["x-ms-client-name"] = "HSM" + - from: swagger-document + where: $.definitions..properties..iv + transform: $["x-ms-client-name"] = "IV" + - from: swagger-document + where: $.definitions..properties..kid + transform: $["x-ms-client-name"] = "KID" + + # Maxresults -> MaxResults + - from: swagger-document + where: $.paths..parameters..[?(@.name=='maxresults')] + transform: $["x-ms-client-name"] = "MaxResults" + + # keyName, keyVersion -> name, version + - from: swagger-document + where: $.paths..parameters..[?(@.name=='key-name')] + transform: $["x-ms-client-name"] = "name" + - from: swagger-document + where: $.paths..parameters..[?(@.name=='key-version')] + transform: $["x-ms-client-name"] = "version" + + # rename parameter models to match their methods + - rename-model: + from: KeyCreateParameters + to: CreateKeyParameters + - rename-model: + from: KeyExportParameters + to: ExportKeyParameters + - rename-model: + from: KeyImportParameters + to: ImportKeyParameters + - rename-model: + from: KeyReleaseParameters + to: ReleaseParameters + - rename-model: + from: KeyRestoreParameters + to: RestoreKeyParameters + - rename-model: + from: KeySignParameters + to: SignParameters + - rename-model: + from: KeyUpdateParameters + to: UpdateKeyParameters + - rename-model: + from: KeyVerifyParameters + to: VerifyParameters + + # rename paged operations from Get* to List* + - rename-operation: + from: GetDeletedKeys + to: ListDeletedKeys + - rename-operation: + from: GetKeys + to: ListKeys + - rename-operation: + from: GetKeyVersions + to: ListKeyVersions + + # delete unused error models + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type (?:Error|KeyVaultError).+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(\w \*?(?:Error|KeyVaultError)\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the Attributes model defined in common.json (it's used only with allOf) + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type Attributes.+\{(?:\s.+\s)+\}\s/, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(a \*?Attributes\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the version path param check (version == "" is legal for Key Vault but indescribable by OpenAPI) + - from: client.go + where: $ + transform: return $.replace(/\sif version == "" \{\s+.+version cannot be empty"\)\s+\}\s/g, ""); + + # delete client name prefix from method options and response types + - from: + - client.go + - models.go + - response_types.go + where: $ + transform: return $.replace(/Client(\w+)((?:Options|Response))/g, "$1$2"); + + # insert a handwritten type for "KID" fields so we can add parsing methods + - from: models.go + where: $ + transform: return $.replace(/(KID \*)string(\s+.*)/g, "$1ID$2") +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/build.go new file mode 100644 index 00000000000..da8103e45c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azkeys diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/ci.yml new file mode 100644 index 00000000000..d0242798730 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/ci.yml @@ -0,0 +1,34 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/azkeys + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/azkeys + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + TimeoutInMinutes: 120 + ServiceDirectory: 'keyvault/azkeys' + RunLiveTests: true + AdditionalMatrixConfigs: + - Name: keyvault_test_matrix_addons + Path: sdk/keyvault/azkeys/platform-matrix.json + Selection: sparse + GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/client.go new file mode 100644 index 00000000000..4cc063c69fd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/client.go @@ -0,0 +1,1302 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Client contains the methods for the Client group. +// Don't use this type directly, use a constructor function instead. +type Client struct { + internal *azcore.Client + endpoint string +} + +// BackupKey - The Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this operation does +// NOT return key material in a form that can be used outside the Azure Key Vault system, +// the returned key material is either protected to a Azure Key Vault HSM or to Azure Key Vault itself. The intent of this +// operation is to allow a client to GENERATE a key in one Azure Key Vault +// instance, BACKUP the key, and then RESTORE it into another Azure Key Vault instance. The BACKUP operation may be used to +// export, in protected form, any key type from Azure Key Vault. Individual +// versions of a key cannot be backed up. BACKUP / RESTORE can be performed within geographical boundaries only; meaning that +// a BACKUP from one geographical area cannot be restored to another +// geographical area. For example, a backup from the US geographical area cannot be restored in an EU geographical area. This +// operation requires the key/backup permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - options - BackupKeyOptions contains the optional parameters for the Client.BackupKey method. +func (client *Client) BackupKey(ctx context.Context, name string, options *BackupKeyOptions) (BackupKeyResponse, error) { + req, err := client.backupKeyCreateRequest(ctx, name, options) + if err != nil { + return BackupKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BackupKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BackupKeyResponse{}, runtime.NewResponseError(resp) + } + return client.backupKeyHandleResponse(resp) +} + +// backupKeyCreateRequest creates the BackupKey request. +func (client *Client) backupKeyCreateRequest(ctx context.Context, name string, options *BackupKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/backup" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// backupKeyHandleResponse handles the BackupKey response. +func (client *Client) backupKeyHandleResponse(resp *http.Response) (BackupKeyResponse, error) { + result := BackupKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.BackupKeyResult); err != nil { + return BackupKeyResponse{}, err + } + return result, nil +} + +// CreateKey - The create key operation can be used to create any key type in Azure Key Vault. If the named key already exists, +// Azure Key Vault creates a new version of the key. It requires the keys/create +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name for the new key. The system will generate the version name for the new key. The value you provide may be +// copied globally for the purpose of running the service. The value provided should not +// include personally identifiable or sensitive information. +// - parameters - The parameters to create a key. +// - options - CreateKeyOptions contains the optional parameters for the Client.CreateKey method. +func (client *Client) CreateKey(ctx context.Context, name string, parameters CreateKeyParameters, options *CreateKeyOptions) (CreateKeyResponse, error) { + req, err := client.createKeyCreateRequest(ctx, name, parameters, options) + if err != nil { + return CreateKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return CreateKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CreateKeyResponse{}, runtime.NewResponseError(resp) + } + return client.createKeyHandleResponse(resp) +} + +// createKeyCreateRequest creates the CreateKey request. +func (client *Client) createKeyCreateRequest(ctx context.Context, name string, parameters CreateKeyParameters, options *CreateKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/create" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// createKeyHandleResponse handles the CreateKey response. +func (client *Client) createKeyHandleResponse(resp *http.Response) (CreateKeyResponse, error) { + result := CreateKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return CreateKeyResponse{}, err + } + return result, nil +} + +// Decrypt - The DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption key and specified +// algorithm. This operation is the reverse of the ENCRYPT operation; only a single block of +// data may be decrypted, the size of this block is dependent on the target key and the algorithm to be used. The DECRYPT +// operation applies to asymmetric and symmetric keys stored in Azure Key Vault +// since it uses the private portion of the key. This operation requires the keys/decrypt permission. Microsoft recommends +// not to use CBC algorithms for decryption without first ensuring the integrity of +// the ciphertext using an HMAC, for example. See https://docs.microsoft.com/dotnet/standard/security/vulnerabilities-cbc-mode +// for more information. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the decryption operation. +// - options - DecryptOptions contains the optional parameters for the Client.Decrypt method. +func (client *Client) Decrypt(ctx context.Context, name string, version string, parameters KeyOperationsParameters, options *DecryptOptions) (DecryptResponse, error) { + req, err := client.decryptCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return DecryptResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DecryptResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DecryptResponse{}, runtime.NewResponseError(resp) + } + return client.decryptHandleResponse(resp) +} + +// decryptCreateRequest creates the Decrypt request. +func (client *Client) decryptCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationsParameters, options *DecryptOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/decrypt" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// decryptHandleResponse handles the Decrypt response. +func (client *Client) decryptHandleResponse(resp *http.Response) (DecryptResponse, error) { + result := DecryptResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return DecryptResponse{}, err + } + return result, nil +} + +// DeleteKey - The delete key operation cannot be used to remove individual versions of a key. This operation removes the +// cryptographic material associated with the key, which means the key is not usable for +// Sign/Verify, Wrap/Unwrap or Encrypt/Decrypt operations. This operation requires the keys/delete permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key to delete. +// - options - DeleteKeyOptions contains the optional parameters for the Client.DeleteKey method. +func (client *Client) DeleteKey(ctx context.Context, name string, options *DeleteKeyOptions) (DeleteKeyResponse, error) { + req, err := client.deleteKeyCreateRequest(ctx, name, options) + if err != nil { + return DeleteKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DeleteKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DeleteKeyResponse{}, runtime.NewResponseError(resp) + } + return client.deleteKeyHandleResponse(resp) +} + +// deleteKeyCreateRequest creates the DeleteKey request. +func (client *Client) deleteKeyCreateRequest(ctx context.Context, name string, options *DeleteKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// deleteKeyHandleResponse handles the DeleteKey response. +func (client *Client) deleteKeyHandleResponse(resp *http.Response) (DeleteKeyResponse, error) { + result := DeleteKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedKeyBundle); err != nil { + return DeleteKeyResponse{}, err + } + return result, nil +} + +// Encrypt - The ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is stored in Azure +// Key Vault. Note that the ENCRYPT operation only supports a single block of data, the size +// of which is dependent on the target key and the encryption algorithm to be used. The ENCRYPT operation is only strictly +// necessary for symmetric keys stored in Azure Key Vault since protection with an +// asymmetric key can be performed using public portion of the key. This operation is supported for asymmetric keys as a convenience +// for callers that have a key-reference but do not have access to the +// public key material. This operation requires the keys/encrypt permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the encryption operation. +// - options - EncryptOptions contains the optional parameters for the Client.Encrypt method. +func (client *Client) Encrypt(ctx context.Context, name string, version string, parameters KeyOperationsParameters, options *EncryptOptions) (EncryptResponse, error) { + req, err := client.encryptCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return EncryptResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EncryptResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return EncryptResponse{}, runtime.NewResponseError(resp) + } + return client.encryptHandleResponse(resp) +} + +// encryptCreateRequest creates the Encrypt request. +func (client *Client) encryptCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationsParameters, options *EncryptOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/encrypt" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// encryptHandleResponse handles the Encrypt response. +func (client *Client) encryptHandleResponse(resp *http.Response) (EncryptResponse, error) { + result := EncryptResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return EncryptResponse{}, err + } + return result, nil +} + +// GetDeletedKey - The Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can be +// invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This +// operation requires the keys/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - options - GetDeletedKeyOptions contains the optional parameters for the Client.GetDeletedKey method. +func (client *Client) GetDeletedKey(ctx context.Context, name string, options *GetDeletedKeyOptions) (GetDeletedKeyResponse, error) { + req, err := client.getDeletedKeyCreateRequest(ctx, name, options) + if err != nil { + return GetDeletedKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetDeletedKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetDeletedKeyResponse{}, runtime.NewResponseError(resp) + } + return client.getDeletedKeyHandleResponse(resp) +} + +// getDeletedKeyCreateRequest creates the GetDeletedKey request. +func (client *Client) getDeletedKeyCreateRequest(ctx context.Context, name string, options *GetDeletedKeyOptions) (*policy.Request, error) { + urlPath := "/deletedkeys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getDeletedKeyHandleResponse handles the GetDeletedKey response. +func (client *Client) getDeletedKeyHandleResponse(resp *http.Response) (GetDeletedKeyResponse, error) { + result := GetDeletedKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedKeyBundle); err != nil { + return GetDeletedKeyResponse{}, err + } + return result, nil +} + +// GetKey - The get key operation is applicable to all key types. If the requested key is symmetric, then no key material +// is released in the response. This operation requires the keys/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key to get. +// - version - Adding the version parameter retrieves a specific version of a key. This URI fragment is optional. If not specified, +// the latest version of the key is returned. +// - options - GetKeyOptions contains the optional parameters for the Client.GetKey method. +func (client *Client) GetKey(ctx context.Context, name string, version string, options *GetKeyOptions) (GetKeyResponse, error) { + req, err := client.getKeyCreateRequest(ctx, name, version, options) + if err != nil { + return GetKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetKeyResponse{}, runtime.NewResponseError(resp) + } + return client.getKeyHandleResponse(resp) +} + +// getKeyCreateRequest creates the GetKey request. +func (client *Client) getKeyCreateRequest(ctx context.Context, name string, version string, options *GetKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getKeyHandleResponse handles the GetKey response. +func (client *Client) getKeyHandleResponse(resp *http.Response) (GetKeyResponse, error) { + result := GetKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return GetKeyResponse{}, err + } + return result, nil +} + +// GetKeyRotationPolicy - The GetKeyRotationPolicy operation returns the specified key policy resources in the specified key +// vault. This operation requires the keys/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key in a given key vault. +// - options - GetKeyRotationPolicyOptions contains the optional parameters for the Client.GetKeyRotationPolicy method. +func (client *Client) GetKeyRotationPolicy(ctx context.Context, name string, options *GetKeyRotationPolicyOptions) (GetKeyRotationPolicyResponse, error) { + req, err := client.getKeyRotationPolicyCreateRequest(ctx, name, options) + if err != nil { + return GetKeyRotationPolicyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetKeyRotationPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetKeyRotationPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.getKeyRotationPolicyHandleResponse(resp) +} + +// getKeyRotationPolicyCreateRequest creates the GetKeyRotationPolicy request. +func (client *Client) getKeyRotationPolicyCreateRequest(ctx context.Context, name string, options *GetKeyRotationPolicyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/rotationpolicy" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getKeyRotationPolicyHandleResponse handles the GetKeyRotationPolicy response. +func (client *Client) getKeyRotationPolicyHandleResponse(resp *http.Response) (GetKeyRotationPolicyResponse, error) { + result := GetKeyRotationPolicyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyRotationPolicy); err != nil { + return GetKeyRotationPolicyResponse{}, err + } + return result, nil +} + +// GetRandomBytes - Get the requested number of bytes containing random values from a managed HSM. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - parameters - The request object to get random bytes. +// - options - GetRandomBytesOptions contains the optional parameters for the Client.GetRandomBytes method. +func (client *Client) GetRandomBytes(ctx context.Context, parameters GetRandomBytesRequest, options *GetRandomBytesOptions) (GetRandomBytesResponse, error) { + req, err := client.getRandomBytesCreateRequest(ctx, parameters, options) + if err != nil { + return GetRandomBytesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetRandomBytesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetRandomBytesResponse{}, runtime.NewResponseError(resp) + } + return client.getRandomBytesHandleResponse(resp) +} + +// getRandomBytesCreateRequest creates the GetRandomBytes request. +func (client *Client) getRandomBytesCreateRequest(ctx context.Context, parameters GetRandomBytesRequest, options *GetRandomBytesOptions) (*policy.Request, error) { + urlPath := "/rng" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// getRandomBytesHandleResponse handles the GetRandomBytes response. +func (client *Client) getRandomBytesHandleResponse(resp *http.Response) (GetRandomBytesResponse, error) { + result := GetRandomBytesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RandomBytes); err != nil { + return GetRandomBytesResponse{}, err + } + return result, nil +} + +// ImportKey - The import key operation may be used to import any key type into an Azure Key Vault. If the named key already +// exists, Azure Key Vault creates a new version of the key. This operation requires the +// keys/import permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - Name for the imported key. The value you provide may be copied globally for the purpose of running the service. +// The value provided should not include personally identifiable or sensitive information. +// - parameters - The parameters to import a key. +// - options - ImportKeyOptions contains the optional parameters for the Client.ImportKey method. +func (client *Client) ImportKey(ctx context.Context, name string, parameters ImportKeyParameters, options *ImportKeyOptions) (ImportKeyResponse, error) { + req, err := client.importKeyCreateRequest(ctx, name, parameters, options) + if err != nil { + return ImportKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ImportKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ImportKeyResponse{}, runtime.NewResponseError(resp) + } + return client.importKeyHandleResponse(resp) +} + +// importKeyCreateRequest creates the ImportKey request. +func (client *Client) importKeyCreateRequest(ctx context.Context, name string, parameters ImportKeyParameters, options *ImportKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// importKeyHandleResponse handles the ImportKey response. +func (client *Client) importKeyHandleResponse(resp *http.Response) (ImportKeyResponse, error) { + result := ImportKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return ImportKeyResponse{}, err + } + return result, nil +} + +// NewListDeletedKeysPager - Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public +// part of a deleted key. This operation includes deletion-specific information. The Get Deleted Keys +// operation is applicable for vaults enabled for soft-delete. While the operation can be invoked on any vault, it will return +// an error if invoked on a non soft-delete enabled vault. This operation +// requires the keys/list permission. +// +// Generated from API version 7.4 +// - options - ListDeletedKeysOptions contains the optional parameters for the Client.NewListDeletedKeysPager method. +func (client *Client) NewListDeletedKeysPager(options *ListDeletedKeysOptions) *runtime.Pager[ListDeletedKeysResponse] { + return runtime.NewPager(runtime.PagingHandler[ListDeletedKeysResponse]{ + More: func(page ListDeletedKeysResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListDeletedKeysResponse) (ListDeletedKeysResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listDeletedKeysCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListDeletedKeysResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListDeletedKeysResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListDeletedKeysResponse{}, runtime.NewResponseError(resp) + } + return client.listDeletedKeysHandleResponse(resp) + }, + }) +} + +// listDeletedKeysCreateRequest creates the ListDeletedKeys request. +func (client *Client) listDeletedKeysCreateRequest(ctx context.Context, options *ListDeletedKeysOptions) (*policy.Request, error) { + urlPath := "/deletedkeys" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.MaxResults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listDeletedKeysHandleResponse handles the ListDeletedKeys response. +func (client *Client) listDeletedKeysHandleResponse(resp *http.Response) (ListDeletedKeysResponse, error) { + result := ListDeletedKeysResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedKeyListResult); err != nil { + return ListDeletedKeysResponse{}, err + } + return result, nil +} + +// NewListKeyVersionsPager - The full key identifier, attributes, and tags are provided in the response. This operation requires +// the keys/list permission. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - options - ListKeyVersionsOptions contains the optional parameters for the Client.NewListKeyVersionsPager method. +func (client *Client) NewListKeyVersionsPager(name string, options *ListKeyVersionsOptions) *runtime.Pager[ListKeyVersionsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListKeyVersionsResponse]{ + More: func(page ListKeyVersionsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListKeyVersionsResponse) (ListKeyVersionsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listKeyVersionsCreateRequest(ctx, name, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListKeyVersionsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListKeyVersionsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListKeyVersionsResponse{}, runtime.NewResponseError(resp) + } + return client.listKeyVersionsHandleResponse(resp) + }, + }) +} + +// listKeyVersionsCreateRequest creates the ListKeyVersions request. +func (client *Client) listKeyVersionsCreateRequest(ctx context.Context, name string, options *ListKeyVersionsOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/versions" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.MaxResults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listKeyVersionsHandleResponse handles the ListKeyVersions response. +func (client *Client) listKeyVersionsHandleResponse(resp *http.Response) (ListKeyVersionsResponse, error) { + result := ListKeyVersionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyListResult); err != nil { + return ListKeyVersionsResponse{}, err + } + return result, nil +} + +// NewListKeysPager - Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part +// of a stored key. The LIST operation is applicable to all key types, however only the base key +// identifier, attributes, and tags are provided in the response. Individual versions of a key are not listed in the response. +// This operation requires the keys/list permission. +// +// Generated from API version 7.4 +// - options - ListKeysOptions contains the optional parameters for the Client.NewListKeysPager method. +func (client *Client) NewListKeysPager(options *ListKeysOptions) *runtime.Pager[ListKeysResponse] { + return runtime.NewPager(runtime.PagingHandler[ListKeysResponse]{ + More: func(page ListKeysResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListKeysResponse) (ListKeysResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listKeysCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListKeysResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListKeysResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListKeysResponse{}, runtime.NewResponseError(resp) + } + return client.listKeysHandleResponse(resp) + }, + }) +} + +// listKeysCreateRequest creates the ListKeys request. +func (client *Client) listKeysCreateRequest(ctx context.Context, options *ListKeysOptions) (*policy.Request, error) { + urlPath := "/keys" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.MaxResults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listKeysHandleResponse handles the ListKeys response. +func (client *Client) listKeysHandleResponse(resp *http.Response) (ListKeysResponse, error) { + result := ListKeysResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyListResult); err != nil { + return ListKeysResponse{}, err + } + return result, nil +} + +// PurgeDeletedKey - The Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can +// be invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. +// This operation requires the keys/purge permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key +// - options - PurgeDeletedKeyOptions contains the optional parameters for the Client.PurgeDeletedKey method. +func (client *Client) PurgeDeletedKey(ctx context.Context, name string, options *PurgeDeletedKeyOptions) (PurgeDeletedKeyResponse, error) { + req, err := client.purgeDeletedKeyCreateRequest(ctx, name, options) + if err != nil { + return PurgeDeletedKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PurgeDeletedKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return PurgeDeletedKeyResponse{}, runtime.NewResponseError(resp) + } + return PurgeDeletedKeyResponse{}, nil +} + +// purgeDeletedKeyCreateRequest creates the PurgeDeletedKey request. +func (client *Client) purgeDeletedKeyCreateRequest(ctx context.Context, name string, options *PurgeDeletedKeyOptions) (*policy.Request, error) { + urlPath := "/deletedkeys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// RecoverDeletedKey - The Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults. It +// recovers the deleted key back to its latest version under /keys. An attempt to recover an non-deleted +// key will return an error. Consider this the inverse of the delete operation on soft-delete enabled vaults. This operation +// requires the keys/recover permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the deleted key. +// - options - RecoverDeletedKeyOptions contains the optional parameters for the Client.RecoverDeletedKey method. +func (client *Client) RecoverDeletedKey(ctx context.Context, name string, options *RecoverDeletedKeyOptions) (RecoverDeletedKeyResponse, error) { + req, err := client.recoverDeletedKeyCreateRequest(ctx, name, options) + if err != nil { + return RecoverDeletedKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RecoverDeletedKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RecoverDeletedKeyResponse{}, runtime.NewResponseError(resp) + } + return client.recoverDeletedKeyHandleResponse(resp) +} + +// recoverDeletedKeyCreateRequest creates the RecoverDeletedKey request. +func (client *Client) recoverDeletedKeyCreateRequest(ctx context.Context, name string, options *RecoverDeletedKeyOptions) (*policy.Request, error) { + urlPath := "/deletedkeys/{key-name}/recover" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// recoverDeletedKeyHandleResponse handles the RecoverDeletedKey response. +func (client *Client) recoverDeletedKeyHandleResponse(resp *http.Response) (RecoverDeletedKeyResponse, error) { + result := RecoverDeletedKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return RecoverDeletedKeyResponse{}, err + } + return result, nil +} + +// Release - The release key operation is applicable to all key types. The target key must be marked exportable. This operation +// requires the keys/release permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key to get. +// - version - Adding the version parameter retrieves a specific version of a key. +// - parameters - The parameters for the key release operation. +// - options - ReleaseOptions contains the optional parameters for the Client.Release method. +func (client *Client) Release(ctx context.Context, name string, version string, parameters ReleaseParameters, options *ReleaseOptions) (ReleaseResponse, error) { + req, err := client.releaseCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return ReleaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ReleaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ReleaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseHandleResponse(resp) +} + +// releaseCreateRequest creates the Release request. +func (client *Client) releaseCreateRequest(ctx context.Context, name string, version string, parameters ReleaseParameters, options *ReleaseOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/release" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// releaseHandleResponse handles the Release response. +func (client *Client) releaseHandleResponse(resp *http.Response) (ReleaseResponse, error) { + result := ReleaseResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyReleaseResult); err != nil { + return ReleaseResponse{}, err + } + return result, nil +} + +// RestoreKey - Imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier, attributes +// and access control policies. The RESTORE operation may be used to import a previously backed +// up key. Individual versions of a key cannot be restored. The key is restored in its entirety with the same key name as +// it had when it was backed up. If the key name is not available in the target Key +// Vault, the RESTORE operation will be rejected. While the key name is retained during restore, the final key identifier +// will change if the key is restored to a different vault. Restore will restore all +// versions and preserve version identifiers. The RESTORE operation is subject to security constraints: The target Key Vault +// must be owned by the same Microsoft Azure Subscription as the source Key Vault +// The user must have RESTORE permission in the target Key Vault. This operation requires the keys/restore permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - parameters - The parameters to restore the key. +// - options - RestoreKeyOptions contains the optional parameters for the Client.RestoreKey method. +func (client *Client) RestoreKey(ctx context.Context, parameters RestoreKeyParameters, options *RestoreKeyOptions) (RestoreKeyResponse, error) { + req, err := client.restoreKeyCreateRequest(ctx, parameters, options) + if err != nil { + return RestoreKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RestoreKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RestoreKeyResponse{}, runtime.NewResponseError(resp) + } + return client.restoreKeyHandleResponse(resp) +} + +// restoreKeyCreateRequest creates the RestoreKey request. +func (client *Client) restoreKeyCreateRequest(ctx context.Context, parameters RestoreKeyParameters, options *RestoreKeyOptions) (*policy.Request, error) { + urlPath := "/keys/restore" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// restoreKeyHandleResponse handles the RestoreKey response. +func (client *Client) restoreKeyHandleResponse(resp *http.Response) (RestoreKeyResponse, error) { + result := RestoreKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return RestoreKeyResponse{}, err + } + return result, nil +} + +// RotateKey - The operation will rotate the key based on the key policy. It requires the keys/rotate permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of key to be rotated. The system will generate a new version in the specified key. +// - options - RotateKeyOptions contains the optional parameters for the Client.RotateKey method. +func (client *Client) RotateKey(ctx context.Context, name string, options *RotateKeyOptions) (RotateKeyResponse, error) { + req, err := client.rotateKeyCreateRequest(ctx, name, options) + if err != nil { + return RotateKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RotateKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RotateKeyResponse{}, runtime.NewResponseError(resp) + } + return client.rotateKeyHandleResponse(resp) +} + +// rotateKeyCreateRequest creates the RotateKey request. +func (client *Client) rotateKeyCreateRequest(ctx context.Context, name string, options *RotateKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/rotate" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// rotateKeyHandleResponse handles the RotateKey response. +func (client *Client) rotateKeyHandleResponse(resp *http.Response) (RotateKeyResponse, error) { + result := RotateKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return RotateKeyResponse{}, err + } + return result, nil +} + +// Sign - The SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault since this operation +// uses the private portion of the key. This operation requires the keys/sign permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the signing operation. +// - options - SignOptions contains the optional parameters for the Client.Sign method. +func (client *Client) Sign(ctx context.Context, name string, version string, parameters SignParameters, options *SignOptions) (SignResponse, error) { + req, err := client.signCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return SignResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return SignResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SignResponse{}, runtime.NewResponseError(resp) + } + return client.signHandleResponse(resp) +} + +// signCreateRequest creates the Sign request. +func (client *Client) signCreateRequest(ctx context.Context, name string, version string, parameters SignParameters, options *SignOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/sign" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// signHandleResponse handles the Sign response. +func (client *Client) signHandleResponse(resp *http.Response) (SignResponse, error) { + result := SignResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return SignResponse{}, err + } + return result, nil +} + +// UnwrapKey - The UNWRAP operation supports decryption of a symmetric key using the target key encryption key. This operation +// is the reverse of the WRAP operation. The UNWRAP operation applies to asymmetric and +// symmetric keys stored in Azure Key Vault since it uses the private portion of the key. This operation requires the keys/unwrapKey +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the key operation. +// - options - UnwrapKeyOptions contains the optional parameters for the Client.UnwrapKey method. +func (client *Client) UnwrapKey(ctx context.Context, name string, version string, parameters KeyOperationsParameters, options *UnwrapKeyOptions) (UnwrapKeyResponse, error) { + req, err := client.unwrapKeyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return UnwrapKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UnwrapKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return UnwrapKeyResponse{}, runtime.NewResponseError(resp) + } + return client.unwrapKeyHandleResponse(resp) +} + +// unwrapKeyCreateRequest creates the UnwrapKey request. +func (client *Client) unwrapKeyCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationsParameters, options *UnwrapKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/unwrapkey" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// unwrapKeyHandleResponse handles the UnwrapKey response. +func (client *Client) unwrapKeyHandleResponse(resp *http.Response) (UnwrapKeyResponse, error) { + result := UnwrapKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return UnwrapKeyResponse{}, err + } + return result, nil +} + +// UpdateKey - In order to perform this operation, the key must already exist in the Key Vault. Note: The cryptographic material +// of a key itself cannot be changed. This operation requires the keys/update permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of key to update. +// - version - The version of the key to update. +// - parameters - The parameters of the key to update. +// - options - UpdateKeyOptions contains the optional parameters for the Client.UpdateKey method. +func (client *Client) UpdateKey(ctx context.Context, name string, version string, parameters UpdateKeyParameters, options *UpdateKeyOptions) (UpdateKeyResponse, error) { + req, err := client.updateKeyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return UpdateKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UpdateKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return UpdateKeyResponse{}, runtime.NewResponseError(resp) + } + return client.updateKeyHandleResponse(resp) +} + +// updateKeyCreateRequest creates the UpdateKey request. +func (client *Client) updateKeyCreateRequest(ctx context.Context, name string, version string, parameters UpdateKeyParameters, options *UpdateKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateKeyHandleResponse handles the UpdateKey response. +func (client *Client) updateKeyHandleResponse(resp *http.Response) (UpdateKeyResponse, error) { + result := UpdateKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return UpdateKeyResponse{}, err + } + return result, nil +} + +// UpdateKeyRotationPolicy - Set specified members in the key policy. Leave others as undefined. This operation requires the +// keys/update permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key in the given vault. +// - keyRotationPolicy - The policy for the key. +// - options - UpdateKeyRotationPolicyOptions contains the optional parameters for the Client.UpdateKeyRotationPolicy +// method. +func (client *Client) UpdateKeyRotationPolicy(ctx context.Context, name string, keyRotationPolicy KeyRotationPolicy, options *UpdateKeyRotationPolicyOptions) (UpdateKeyRotationPolicyResponse, error) { + req, err := client.updateKeyRotationPolicyCreateRequest(ctx, name, keyRotationPolicy, options) + if err != nil { + return UpdateKeyRotationPolicyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UpdateKeyRotationPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return UpdateKeyRotationPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.updateKeyRotationPolicyHandleResponse(resp) +} + +// updateKeyRotationPolicyCreateRequest creates the UpdateKeyRotationPolicy request. +func (client *Client) updateKeyRotationPolicyCreateRequest(ctx context.Context, name string, keyRotationPolicy KeyRotationPolicy, options *UpdateKeyRotationPolicyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/rotationpolicy" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, keyRotationPolicy) +} + +// updateKeyRotationPolicyHandleResponse handles the UpdateKeyRotationPolicy response. +func (client *Client) updateKeyRotationPolicyHandleResponse(resp *http.Response) (UpdateKeyRotationPolicyResponse, error) { + result := UpdateKeyRotationPolicyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyRotationPolicy); err != nil { + return UpdateKeyRotationPolicyResponse{}, err + } + return result, nil +} + +// Verify - The VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not strictly necessary +// for asymmetric keys stored in Azure Key Vault since signature verification can be +// performed using the public portion of the key but this operation is supported as a convenience for callers that only have +// a key-reference and not the public portion of the key. This operation requires +// the keys/verify permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for verify operations. +// - options - VerifyOptions contains the optional parameters for the Client.Verify method. +func (client *Client) Verify(ctx context.Context, name string, version string, parameters VerifyParameters, options *VerifyOptions) (VerifyResponse, error) { + req, err := client.verifyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return VerifyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return VerifyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VerifyResponse{}, runtime.NewResponseError(resp) + } + return client.verifyHandleResponse(resp) +} + +// verifyCreateRequest creates the Verify request. +func (client *Client) verifyCreateRequest(ctx context.Context, name string, version string, parameters VerifyParameters, options *VerifyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/verify" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// verifyHandleResponse handles the Verify response. +func (client *Client) verifyHandleResponse(resp *http.Response) (VerifyResponse, error) { + result := VerifyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyVerifyResult); err != nil { + return VerifyResponse{}, err + } + return result, nil +} + +// WrapKey - The WRAP operation supports encryption of a symmetric key using a key encryption key that has previously been +// stored in an Azure Key Vault. The WRAP operation is only strictly necessary for symmetric +// keys stored in Azure Key Vault since protection with an asymmetric key can be performed using the public portion of the +// key. This operation is supported for asymmetric keys as a convenience for +// callers that have a key-reference but do not have access to the public key material. This operation requires the keys/wrapKey +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for wrap operation. +// - options - WrapKeyOptions contains the optional parameters for the Client.WrapKey method. +func (client *Client) WrapKey(ctx context.Context, name string, version string, parameters KeyOperationsParameters, options *WrapKeyOptions) (WrapKeyResponse, error) { + req, err := client.wrapKeyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return WrapKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return WrapKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return WrapKeyResponse{}, runtime.NewResponseError(resp) + } + return client.wrapKeyHandleResponse(resp) +} + +// wrapKeyCreateRequest creates the WrapKey request. +func (client *Client) wrapKeyCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationsParameters, options *WrapKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/wrapkey" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// wrapKeyHandleResponse handles the WrapKey response. +func (client *Client) wrapKeyHandleResponse(resp *http.Response) (WrapKeyResponse, error) { + result := WrapKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return WrapKeyResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/constants.go new file mode 100644 index 00000000000..464b9a67ceb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/constants.go @@ -0,0 +1,270 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +// DeletionRecoveryLevel - Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains +// 'Purgeable' the key can be permanently deleted by a privileged user; otherwise, only the system +// can purge the key, at the end of the retention interval. +type DeletionRecoveryLevel string + +const ( + // DeletionRecoveryLevelCustomizedRecoverable - Denotes a vault state in which deletion is recoverable without the possibility + // for immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90).This level guarantees the recoverability + // of the deleted entity during the retention interval and while the subscription is still available. + DeletionRecoveryLevelCustomizedRecoverable DeletionRecoveryLevel = "CustomizedRecoverable" + // DeletionRecoveryLevelCustomizedRecoverableProtectedSubscription - Denotes a vault and subscription state in which deletion + // is recoverable, immediate and permanent deletion (i.e. purge) is not permitted, and in which the subscription itself cannot + // be permanently canceled when 7<= SoftDeleteRetentionInDays < 90. This level guarantees the recoverability of the deleted + // entity during the retention interval, and also reflects the fact that the subscription itself cannot be cancelled. + DeletionRecoveryLevelCustomizedRecoverableProtectedSubscription DeletionRecoveryLevel = "CustomizedRecoverable+ProtectedSubscription" + // DeletionRecoveryLevelCustomizedRecoverablePurgeable - Denotes a vault state in which deletion is recoverable, and which + // also permits immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90). This level guarantees + // the recoverability of the deleted entity during the retention interval, unless a Purge operation is requested, or the subscription + // is cancelled. + DeletionRecoveryLevelCustomizedRecoverablePurgeable DeletionRecoveryLevel = "CustomizedRecoverable+Purgeable" + // DeletionRecoveryLevelPurgeable - Denotes a vault state in which deletion is an irreversible operation, without the possibility + // for recovery. This level corresponds to no protection being available against a Delete operation; the data is irretrievably + // lost upon accepting a Delete operation at the entity level or higher (vault, resource group, subscription etc.) + DeletionRecoveryLevelPurgeable DeletionRecoveryLevel = "Purgeable" + // DeletionRecoveryLevelRecoverable - Denotes a vault state in which deletion is recoverable without the possibility for immediate + // and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity during the retention + // interval(90 days) and while the subscription is still available. System wil permanently delete it after 90 days, if not + // recovered + DeletionRecoveryLevelRecoverable DeletionRecoveryLevel = "Recoverable" + // DeletionRecoveryLevelRecoverableProtectedSubscription - Denotes a vault and subscription state in which deletion is recoverable + // within retention interval (90 days), immediate and permanent deletion (i.e. purge) is not permitted, and in which the subscription + // itself cannot be permanently canceled. System wil permanently delete it after 90 days, if not recovered + DeletionRecoveryLevelRecoverableProtectedSubscription DeletionRecoveryLevel = "Recoverable+ProtectedSubscription" + // DeletionRecoveryLevelRecoverablePurgeable - Denotes a vault state in which deletion is recoverable, and which also permits + // immediate and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity during the + // retention interval (90 days), unless a Purge operation is requested, or the subscription is cancelled. System wil permanently + // delete it after 90 days, if not recovered + DeletionRecoveryLevelRecoverablePurgeable DeletionRecoveryLevel = "Recoverable+Purgeable" +) + +// PossibleDeletionRecoveryLevelValues returns the possible values for the DeletionRecoveryLevel const type. +func PossibleDeletionRecoveryLevelValues() []DeletionRecoveryLevel { + return []DeletionRecoveryLevel{ + DeletionRecoveryLevelCustomizedRecoverable, + DeletionRecoveryLevelCustomizedRecoverableProtectedSubscription, + DeletionRecoveryLevelCustomizedRecoverablePurgeable, + DeletionRecoveryLevelPurgeable, + DeletionRecoveryLevelRecoverable, + DeletionRecoveryLevelRecoverableProtectedSubscription, + DeletionRecoveryLevelRecoverablePurgeable, + } +} + +// JSONWebKeyCurveName - Elliptic curve name. For valid values, see JsonWebKeyCurveName. +type JSONWebKeyCurveName string + +const ( + // JSONWebKeyCurveNameP256 - The NIST P-256 elliptic curve, AKA SECG curve SECP256R1. + JSONWebKeyCurveNameP256 JSONWebKeyCurveName = "P-256" + // JSONWebKeyCurveNameP256K - The SECG SECP256K1 elliptic curve. + JSONWebKeyCurveNameP256K JSONWebKeyCurveName = "P-256K" + // JSONWebKeyCurveNameP384 - The NIST P-384 elliptic curve, AKA SECG curve SECP384R1. + JSONWebKeyCurveNameP384 JSONWebKeyCurveName = "P-384" + // JSONWebKeyCurveNameP521 - The NIST P-521 elliptic curve, AKA SECG curve SECP521R1. + JSONWebKeyCurveNameP521 JSONWebKeyCurveName = "P-521" +) + +// PossibleJSONWebKeyCurveNameValues returns the possible values for the JSONWebKeyCurveName const type. +func PossibleJSONWebKeyCurveNameValues() []JSONWebKeyCurveName { + return []JSONWebKeyCurveName{ + JSONWebKeyCurveNameP256, + JSONWebKeyCurveNameP256K, + JSONWebKeyCurveNameP384, + JSONWebKeyCurveNameP521, + } +} + +// JSONWebKeyEncryptionAlgorithm - algorithm identifier +type JSONWebKeyEncryptionAlgorithm string + +const ( + JSONWebKeyEncryptionAlgorithmA128CBC JSONWebKeyEncryptionAlgorithm = "A128CBC" + JSONWebKeyEncryptionAlgorithmA128CBCPAD JSONWebKeyEncryptionAlgorithm = "A128CBCPAD" + JSONWebKeyEncryptionAlgorithmA128GCM JSONWebKeyEncryptionAlgorithm = "A128GCM" + JSONWebKeyEncryptionAlgorithmA128KW JSONWebKeyEncryptionAlgorithm = "A128KW" + JSONWebKeyEncryptionAlgorithmA192CBC JSONWebKeyEncryptionAlgorithm = "A192CBC" + JSONWebKeyEncryptionAlgorithmA192CBCPAD JSONWebKeyEncryptionAlgorithm = "A192CBCPAD" + JSONWebKeyEncryptionAlgorithmA192GCM JSONWebKeyEncryptionAlgorithm = "A192GCM" + JSONWebKeyEncryptionAlgorithmA192KW JSONWebKeyEncryptionAlgorithm = "A192KW" + JSONWebKeyEncryptionAlgorithmA256CBC JSONWebKeyEncryptionAlgorithm = "A256CBC" + JSONWebKeyEncryptionAlgorithmA256CBCPAD JSONWebKeyEncryptionAlgorithm = "A256CBCPAD" + JSONWebKeyEncryptionAlgorithmA256GCM JSONWebKeyEncryptionAlgorithm = "A256GCM" + JSONWebKeyEncryptionAlgorithmA256KW JSONWebKeyEncryptionAlgorithm = "A256KW" + JSONWebKeyEncryptionAlgorithmRSA15 JSONWebKeyEncryptionAlgorithm = "RSA1_5" + JSONWebKeyEncryptionAlgorithmRSAOAEP JSONWebKeyEncryptionAlgorithm = "RSA-OAEP" + JSONWebKeyEncryptionAlgorithmRSAOAEP256 JSONWebKeyEncryptionAlgorithm = "RSA-OAEP-256" +) + +// PossibleJSONWebKeyEncryptionAlgorithmValues returns the possible values for the JSONWebKeyEncryptionAlgorithm const type. +func PossibleJSONWebKeyEncryptionAlgorithmValues() []JSONWebKeyEncryptionAlgorithm { + return []JSONWebKeyEncryptionAlgorithm{ + JSONWebKeyEncryptionAlgorithmA128CBC, + JSONWebKeyEncryptionAlgorithmA128CBCPAD, + JSONWebKeyEncryptionAlgorithmA128GCM, + JSONWebKeyEncryptionAlgorithmA128KW, + JSONWebKeyEncryptionAlgorithmA192CBC, + JSONWebKeyEncryptionAlgorithmA192CBCPAD, + JSONWebKeyEncryptionAlgorithmA192GCM, + JSONWebKeyEncryptionAlgorithmA192KW, + JSONWebKeyEncryptionAlgorithmA256CBC, + JSONWebKeyEncryptionAlgorithmA256CBCPAD, + JSONWebKeyEncryptionAlgorithmA256GCM, + JSONWebKeyEncryptionAlgorithmA256KW, + JSONWebKeyEncryptionAlgorithmRSA15, + JSONWebKeyEncryptionAlgorithmRSAOAEP, + JSONWebKeyEncryptionAlgorithmRSAOAEP256, + } +} + +// JSONWebKeyOperation - JSON web key operations. For more information, see JsonWebKeyOperation. +type JSONWebKeyOperation string + +const ( + JSONWebKeyOperationDecrypt JSONWebKeyOperation = "decrypt" + JSONWebKeyOperationEncrypt JSONWebKeyOperation = "encrypt" + JSONWebKeyOperationExport JSONWebKeyOperation = "export" + JSONWebKeyOperationImport JSONWebKeyOperation = "import" + JSONWebKeyOperationSign JSONWebKeyOperation = "sign" + JSONWebKeyOperationUnwrapKey JSONWebKeyOperation = "unwrapKey" + JSONWebKeyOperationVerify JSONWebKeyOperation = "verify" + JSONWebKeyOperationWrapKey JSONWebKeyOperation = "wrapKey" +) + +// PossibleJSONWebKeyOperationValues returns the possible values for the JSONWebKeyOperation const type. +func PossibleJSONWebKeyOperationValues() []JSONWebKeyOperation { + return []JSONWebKeyOperation{ + JSONWebKeyOperationDecrypt, + JSONWebKeyOperationEncrypt, + JSONWebKeyOperationExport, + JSONWebKeyOperationImport, + JSONWebKeyOperationSign, + JSONWebKeyOperationUnwrapKey, + JSONWebKeyOperationVerify, + JSONWebKeyOperationWrapKey, + } +} + +// JSONWebKeySignatureAlgorithm - The signing/verification algorithm identifier. For more information on possible algorithm +// types, see JsonWebKeySignatureAlgorithm. +type JSONWebKeySignatureAlgorithm string + +const ( + // JSONWebKeySignatureAlgorithmES256 - ECDSA using P-256 and SHA-256, as described in https://tools.ietf.org/html/rfc7518. + JSONWebKeySignatureAlgorithmES256 JSONWebKeySignatureAlgorithm = "ES256" + // JSONWebKeySignatureAlgorithmES256K - ECDSA using P-256K and SHA-256, as described in https://tools.ietf.org/html/rfc7518 + JSONWebKeySignatureAlgorithmES256K JSONWebKeySignatureAlgorithm = "ES256K" + // JSONWebKeySignatureAlgorithmES384 - ECDSA using P-384 and SHA-384, as described in https://tools.ietf.org/html/rfc7518 + JSONWebKeySignatureAlgorithmES384 JSONWebKeySignatureAlgorithm = "ES384" + // JSONWebKeySignatureAlgorithmES512 - ECDSA using P-521 and SHA-512, as described in https://tools.ietf.org/html/rfc7518 + JSONWebKeySignatureAlgorithmES512 JSONWebKeySignatureAlgorithm = "ES512" + // JSONWebKeySignatureAlgorithmPS256 - RSASSA-PSS using SHA-256 and MGF1 with SHA-256, as described in https://tools.ietf.org/html/rfc7518 + JSONWebKeySignatureAlgorithmPS256 JSONWebKeySignatureAlgorithm = "PS256" + // JSONWebKeySignatureAlgorithmPS384 - RSASSA-PSS using SHA-384 and MGF1 with SHA-384, as described in https://tools.ietf.org/html/rfc7518 + JSONWebKeySignatureAlgorithmPS384 JSONWebKeySignatureAlgorithm = "PS384" + // JSONWebKeySignatureAlgorithmPS512 - RSASSA-PSS using SHA-512 and MGF1 with SHA-512, as described in https://tools.ietf.org/html/rfc7518 + JSONWebKeySignatureAlgorithmPS512 JSONWebKeySignatureAlgorithm = "PS512" + // JSONWebKeySignatureAlgorithmRS256 - RSASSA-PKCS1-v1_5 using SHA-256, as described in https://tools.ietf.org/html/rfc7518 + JSONWebKeySignatureAlgorithmRS256 JSONWebKeySignatureAlgorithm = "RS256" + // JSONWebKeySignatureAlgorithmRS384 - RSASSA-PKCS1-v1_5 using SHA-384, as described in https://tools.ietf.org/html/rfc7518 + JSONWebKeySignatureAlgorithmRS384 JSONWebKeySignatureAlgorithm = "RS384" + // JSONWebKeySignatureAlgorithmRS512 - RSASSA-PKCS1-v1_5 using SHA-512, as described in https://tools.ietf.org/html/rfc7518 + JSONWebKeySignatureAlgorithmRS512 JSONWebKeySignatureAlgorithm = "RS512" + // JSONWebKeySignatureAlgorithmRSNULL - Reserved + JSONWebKeySignatureAlgorithmRSNULL JSONWebKeySignatureAlgorithm = "RSNULL" +) + +// PossibleJSONWebKeySignatureAlgorithmValues returns the possible values for the JSONWebKeySignatureAlgorithm const type. +func PossibleJSONWebKeySignatureAlgorithmValues() []JSONWebKeySignatureAlgorithm { + return []JSONWebKeySignatureAlgorithm{ + JSONWebKeySignatureAlgorithmES256, + JSONWebKeySignatureAlgorithmES256K, + JSONWebKeySignatureAlgorithmES384, + JSONWebKeySignatureAlgorithmES512, + JSONWebKeySignatureAlgorithmPS256, + JSONWebKeySignatureAlgorithmPS384, + JSONWebKeySignatureAlgorithmPS512, + JSONWebKeySignatureAlgorithmRS256, + JSONWebKeySignatureAlgorithmRS384, + JSONWebKeySignatureAlgorithmRS512, + JSONWebKeySignatureAlgorithmRSNULL, + } +} + +// JSONWebKeyType - JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. +type JSONWebKeyType string + +const ( + // JSONWebKeyTypeEC - Elliptic Curve. + JSONWebKeyTypeEC JSONWebKeyType = "EC" + // JSONWebKeyTypeECHSM - Elliptic Curve with a private key which is stored in the HSM. + JSONWebKeyTypeECHSM JSONWebKeyType = "EC-HSM" + // JSONWebKeyTypeOct - Octet sequence (used to represent symmetric keys) + JSONWebKeyTypeOct JSONWebKeyType = "oct" + // JSONWebKeyTypeOctHSM - Octet sequence (used to represent symmetric keys) which is stored the HSM. + JSONWebKeyTypeOctHSM JSONWebKeyType = "oct-HSM" + // JSONWebKeyTypeRSA - RSA (https://tools.ietf.org/html/rfc3447) + JSONWebKeyTypeRSA JSONWebKeyType = "RSA" + // JSONWebKeyTypeRSAHSM - RSA with a private key which is stored in the HSM. + JSONWebKeyTypeRSAHSM JSONWebKeyType = "RSA-HSM" +) + +// PossibleJSONWebKeyTypeValues returns the possible values for the JSONWebKeyType const type. +func PossibleJSONWebKeyTypeValues() []JSONWebKeyType { + return []JSONWebKeyType{ + JSONWebKeyTypeEC, + JSONWebKeyTypeECHSM, + JSONWebKeyTypeOct, + JSONWebKeyTypeOctHSM, + JSONWebKeyTypeRSA, + JSONWebKeyTypeRSAHSM, + } +} + +// KeyEncryptionAlgorithm - The encryption algorithm to use to protected the exported key material +type KeyEncryptionAlgorithm string + +const ( + KeyEncryptionAlgorithmCKMRSAAESKEYWRAP KeyEncryptionAlgorithm = "CKM_RSA_AES_KEY_WRAP" + KeyEncryptionAlgorithmRSAAESKEYWRAP256 KeyEncryptionAlgorithm = "RSA_AES_KEY_WRAP_256" + KeyEncryptionAlgorithmRSAAESKEYWRAP384 KeyEncryptionAlgorithm = "RSA_AES_KEY_WRAP_384" +) + +// PossibleKeyEncryptionAlgorithmValues returns the possible values for the KeyEncryptionAlgorithm const type. +func PossibleKeyEncryptionAlgorithmValues() []KeyEncryptionAlgorithm { + return []KeyEncryptionAlgorithm{ + KeyEncryptionAlgorithmCKMRSAAESKEYWRAP, + KeyEncryptionAlgorithmRSAAESKEYWRAP256, + KeyEncryptionAlgorithmRSAAESKEYWRAP384, + } +} + +// KeyRotationPolicyAction - The type of the action. +type KeyRotationPolicyAction string + +const ( + // KeyRotationPolicyActionRotate - Rotate the key based on the key policy. + KeyRotationPolicyActionRotate KeyRotationPolicyAction = "rotate" + // KeyRotationPolicyActionNotify - Trigger event grid events. For preview, the notification time is not configurable and it + // is default to 30 days before expiry. + KeyRotationPolicyActionNotify KeyRotationPolicyAction = "notify" +) + +// PossibleKeyRotationPolicyActionValues returns the possible values for the KeyRotationPolicyAction const type. +func PossibleKeyRotationPolicyActionValues() []KeyRotationPolicyAction { + return []KeyRotationPolicyAction{ + KeyRotationPolicyActionRotate, + KeyRotationPolicyActionNotify, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/custom_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/custom_client.go new file mode 100644 index 00000000000..e9624667596 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/custom_client.go @@ -0,0 +1,63 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azkeys + +// this file contains handwritten additions to the generated code + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal" +) + +// ClientOptions contains optional settings for Client. +type ClientOptions struct { + azcore.ClientOptions + + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain. + // See https://aka.ms/azsdk/blog/vault-uri for more information. + DisableChallengeResourceVerification bool +} + +// NewClient creates a client that accesses a Key Vault's keys. You should validate that vaultURL +// references a valid Key Vault or Managed HSM. See https://aka.ms/azsdk/blog/vault-uri for details. +func NewClient(vaultURL string, credential azcore.TokenCredential, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + authPolicy := internal.NewKeyVaultChallengePolicy( + credential, + &internal.KeyVaultChallengePolicyOptions{ + DisableChallengeResourceVerification: options.DisableChallengeResourceVerification, + }, + ) + azcoreClient, err := azcore.NewClient("azkeys.Client", version, runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}, &options.ClientOptions) + if err != nil { + return nil, err + } + return &Client{endpoint: vaultURL, internal: azcoreClient}, nil +} + +// ID is a key's unique ID, containing its version, if any, and name. +type ID string + +// Name of the key. +func (i *ID) Name() string { + _, name, _ := internal.ParseID((*string)(i)) + return *name +} + +// Version of the key. This returns an empty string when the ID contains no version. +func (i *ID) Version() string { + _, _, version := internal.ParseID((*string)(i)) + if version == nil { + return "" + } + return *version +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/models.go new file mode 100644 index 00000000000..7561561ad08 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/models.go @@ -0,0 +1,549 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +import "time" + +// BackupKeyResult - The backup key result, containing the backup blob. +type BackupKeyResult struct { + // READ-ONLY; The backup blob containing the backed up key. + Value []byte `json:"value,omitempty" azure:"ro"` +} + +// BackupKeyOptions contains the optional parameters for the Client.BackupKey method. +type BackupKeyOptions struct { + // placeholder for future optional parameters +} + +// CreateKeyOptions contains the optional parameters for the Client.CreateKey method. +type CreateKeyOptions struct { + // placeholder for future optional parameters +} + +// DecryptOptions contains the optional parameters for the Client.Decrypt method. +type DecryptOptions struct { + // placeholder for future optional parameters +} + +// DeleteKeyOptions contains the optional parameters for the Client.DeleteKey method. +type DeleteKeyOptions struct { + // placeholder for future optional parameters +} + +// EncryptOptions contains the optional parameters for the Client.Encrypt method. +type EncryptOptions struct { + // placeholder for future optional parameters +} + +// GetDeletedKeyOptions contains the optional parameters for the Client.GetDeletedKey method. +type GetDeletedKeyOptions struct { + // placeholder for future optional parameters +} + +// GetKeyOptions contains the optional parameters for the Client.GetKey method. +type GetKeyOptions struct { + // placeholder for future optional parameters +} + +// GetKeyRotationPolicyOptions contains the optional parameters for the Client.GetKeyRotationPolicy method. +type GetKeyRotationPolicyOptions struct { + // placeholder for future optional parameters +} + +// GetRandomBytesOptions contains the optional parameters for the Client.GetRandomBytes method. +type GetRandomBytesOptions struct { + // placeholder for future optional parameters +} + +// ImportKeyOptions contains the optional parameters for the Client.ImportKey method. +type ImportKeyOptions struct { + // placeholder for future optional parameters +} + +// ListDeletedKeysOptions contains the optional parameters for the Client.NewListDeletedKeysPager method. +type ListDeletedKeysOptions struct { + // Maximum number of results to return in a page. If not specified the service will return up to 25 results. + MaxResults *int32 +} + +// ListKeyVersionsOptions contains the optional parameters for the Client.NewListKeyVersionsPager method. +type ListKeyVersionsOptions struct { + // Maximum number of results to return in a page. If not specified the service will return up to 25 results. + MaxResults *int32 +} + +// ListKeysOptions contains the optional parameters for the Client.NewListKeysPager method. +type ListKeysOptions struct { + // Maximum number of results to return in a page. If not specified the service will return up to 25 results. + MaxResults *int32 +} + +// PurgeDeletedKeyOptions contains the optional parameters for the Client.PurgeDeletedKey method. +type PurgeDeletedKeyOptions struct { + // placeholder for future optional parameters +} + +// RecoverDeletedKeyOptions contains the optional parameters for the Client.RecoverDeletedKey method. +type RecoverDeletedKeyOptions struct { + // placeholder for future optional parameters +} + +// ReleaseOptions contains the optional parameters for the Client.Release method. +type ReleaseOptions struct { + // placeholder for future optional parameters +} + +// RestoreKeyOptions contains the optional parameters for the Client.RestoreKey method. +type RestoreKeyOptions struct { + // placeholder for future optional parameters +} + +// RotateKeyOptions contains the optional parameters for the Client.RotateKey method. +type RotateKeyOptions struct { + // placeholder for future optional parameters +} + +// SignOptions contains the optional parameters for the Client.Sign method. +type SignOptions struct { + // placeholder for future optional parameters +} + +// UnwrapKeyOptions contains the optional parameters for the Client.UnwrapKey method. +type UnwrapKeyOptions struct { + // placeholder for future optional parameters +} + +// UpdateKeyOptions contains the optional parameters for the Client.UpdateKey method. +type UpdateKeyOptions struct { + // placeholder for future optional parameters +} + +// UpdateKeyRotationPolicyOptions contains the optional parameters for the Client.UpdateKeyRotationPolicy method. +type UpdateKeyRotationPolicyOptions struct { + // placeholder for future optional parameters +} + +// VerifyOptions contains the optional parameters for the Client.Verify method. +type VerifyOptions struct { + // placeholder for future optional parameters +} + +// WrapKeyOptions contains the optional parameters for the Client.WrapKey method. +type WrapKeyOptions struct { + // placeholder for future optional parameters +} + +// CreateKeyParameters - The key create parameters. +type CreateKeyParameters struct { + // REQUIRED; The type of key to create. For valid values, see JsonWebKeyType. + Kty *JSONWebKeyType `json:"kty,omitempty"` + + // Elliptic curve name. For valid values, see JsonWebKeyCurveName. + Curve *JSONWebKeyCurveName `json:"crv,omitempty"` + + // The attributes of a key managed by the key vault service. + KeyAttributes *KeyAttributes `json:"attributes,omitempty"` + KeyOps []*JSONWebKeyOperation `json:"key_ops,omitempty"` + + // The key size in bits. For example: 2048, 3072, or 4096 for RSA. + KeySize *int32 `json:"key_size,omitempty"` + + // The public exponent for a RSA key. + PublicExponent *int32 `json:"public_exponent,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} + +// DeletedKeyBundle - A DeletedKeyBundle consisting of a WebKey plus its Attributes and deletion info +type DeletedKeyBundle struct { + // The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + + // The Json web key. + Key *JSONWebKey `json:"key,omitempty"` + + // The url of the recovery object, used to identify and recover the deleted key. + RecoveryID *string `json:"recoveryId,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; The time when the key was deleted, in UTC + DeletedDate *time.Time `json:"deletedDate,omitempty" azure:"ro"` + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` + + // READ-ONLY; The time when the key is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time `json:"scheduledPurgeDate,omitempty" azure:"ro"` +} + +// DeletedKeyItem - The deleted key item containing the deleted key metadata and information about deletion. +type DeletedKeyItem struct { + // The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + + // Key identifier. + KID *ID `json:"kid,omitempty"` + + // The url of the recovery object, used to identify and recover the deleted key. + RecoveryID *string `json:"recoveryId,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; The time when the key was deleted, in UTC + DeletedDate *time.Time `json:"deletedDate,omitempty" azure:"ro"` + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` + + // READ-ONLY; The time when the key is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time `json:"scheduledPurgeDate,omitempty" azure:"ro"` +} + +// DeletedKeyListResult - A list of keys that have been deleted in this vault. +type DeletedKeyListResult struct { + // READ-ONLY; The URL to get the next set of deleted keys. + NextLink *string `json:"nextLink,omitempty" azure:"ro"` + + // READ-ONLY; A response message containing a list of deleted keys in the vault along with a link to the next page of deleted + // keys + Value []*DeletedKeyItem `json:"value,omitempty" azure:"ro"` +} + +// GetRandomBytesRequest - The get random bytes request object. +type GetRandomBytesRequest struct { + // REQUIRED; The requested number of random bytes. + Count *int32 `json:"count,omitempty"` +} + +// ImportKeyParameters - The key import parameters. +type ImportKeyParameters struct { + // REQUIRED; The Json web key + Key *JSONWebKey `json:"key,omitempty"` + + // Whether to import as a hardware key (HSM) or software key. + HSM *bool `json:"Hsm,omitempty"` + + // The key management attributes. + KeyAttributes *KeyAttributes `json:"attributes,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} + +// JSONWebKey - As of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18 +type JSONWebKey struct { + // Elliptic curve name. For valid values, see JsonWebKeyCurveName. + Crv *JSONWebKeyCurveName `json:"crv,omitempty"` + + // RSA private exponent, or the D component of an EC private key. + D []byte `json:"d,omitempty"` + + // RSA private key parameter. + DP []byte `json:"dp,omitempty"` + + // RSA private key parameter. + DQ []byte `json:"dq,omitempty"` + + // RSA public exponent. + E []byte `json:"e,omitempty"` + + // Symmetric key. + K []byte `json:"k,omitempty"` + + // Key identifier. + KID *ID `json:"kid,omitempty"` + KeyOps []*string `json:"key_ops,omitempty"` + + // JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. + Kty *JSONWebKeyType `json:"kty,omitempty"` + + // RSA modulus. + N []byte `json:"n,omitempty"` + + // RSA secret prime. + P []byte `json:"p,omitempty"` + + // RSA secret prime, with p < q. + Q []byte `json:"q,omitempty"` + + // RSA private key parameter. + QI []byte `json:"qi,omitempty"` + + // Protected Key, used with 'Bring Your Own Key'. + T []byte `json:"key_hsm,omitempty"` + + // X component of an EC public key. + X []byte `json:"x,omitempty"` + + // Y component of an EC public key. + Y []byte `json:"y,omitempty"` +} + +// KeyAttributes - The attributes of a key managed by the key vault service. +type KeyAttributes struct { + // Determines whether the object is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Expiry date in UTC. + Expires *time.Time `json:"exp,omitempty"` + + // Indicates if the private key can be exported. Release policy must be provided when creating the first version of an exportable + // key. + Exportable *bool `json:"exportable,omitempty"` + + // Not before date in UTC. + NotBefore *time.Time `json:"nbf,omitempty"` + + // READ-ONLY; Creation time in UTC. + Created *time.Time `json:"created,omitempty" azure:"ro"` + + // READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 `json:"recoverableDays,omitempty" azure:"ro"` + + // READ-ONLY; Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains 'Purgeable' + // the key can be permanently deleted by a privileged user; otherwise, only the system + // can purge the key, at the end of the retention interval. + RecoveryLevel *DeletionRecoveryLevel `json:"recoveryLevel,omitempty" azure:"ro"` + + // READ-ONLY; Last updated time in UTC. + Updated *time.Time `json:"updated,omitempty" azure:"ro"` +} + +// KeyBundle - A KeyBundle consisting of a WebKey plus its attributes. +type KeyBundle struct { + // The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + + // The Json web key. + Key *JSONWebKey `json:"key,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` +} + +// KeyItem - The key item containing key metadata. +type KeyItem struct { + // The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + + // Key identifier. + KID *ID `json:"kid,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` +} + +// KeyListResult - The key list result. +type KeyListResult struct { + // READ-ONLY; The URL to get the next set of keys. + NextLink *string `json:"nextLink,omitempty" azure:"ro"` + + // READ-ONLY; A response message containing a list of keys in the key vault along with a link to the next page of keys. + Value []*KeyItem `json:"value,omitempty" azure:"ro"` +} + +// KeyOperationResult - The key operation result. +type KeyOperationResult struct { + // READ-ONLY + AdditionalAuthenticatedData []byte `json:"aad,omitempty" azure:"ro"` + + // READ-ONLY + AuthenticationTag []byte `json:"tag,omitempty" azure:"ro"` + + // READ-ONLY + IV []byte `json:"iv,omitempty" azure:"ro"` + + // READ-ONLY; Key identifier + KID *ID `json:"kid,omitempty" azure:"ro"` + + // READ-ONLY + Result []byte `json:"value,omitempty" azure:"ro"` +} + +// KeyOperationsParameters - The key operations parameters. +type KeyOperationsParameters struct { + // REQUIRED; algorithm identifier + Algorithm *JSONWebKeyEncryptionAlgorithm `json:"alg,omitempty"` + + // REQUIRED + Value []byte `json:"value,omitempty"` + + // Additional data to authenticate but not encrypt/decrypt when using authenticated crypto algorithms. + AAD []byte `json:"aad,omitempty"` + + // Cryptographically random, non-repeating initialization vector for symmetric algorithms. + IV []byte `json:"iv,omitempty"` + + // The tag to authenticate when performing decryption with an authenticated algorithm. + Tag []byte `json:"tag,omitempty"` +} + +// KeyReleasePolicy - The policy rules under which the key can be exported. +type KeyReleasePolicy struct { + // Content type and version of key release policy + ContentType *string `json:"contentType,omitempty"` + + // Blob encoding the policy rules under which the key can be released. Blob must be base64 URL encoded. + EncodedPolicy []byte `json:"data,omitempty"` + + // Defines the mutability state of the policy. Once marked immutable, this flag cannot be reset and the policy cannot be changed + // under any circumstances. + Immutable *bool `json:"immutable,omitempty"` +} + +// KeyReleaseResult - The release result, containing the released key. +type KeyReleaseResult struct { + // READ-ONLY; A signed object containing the released key. + Value *string `json:"value,omitempty" azure:"ro"` +} + +// KeyRotationPolicy - Management policy for a key. +type KeyRotationPolicy struct { + // The key rotation policy attributes. + Attributes *KeyRotationPolicyAttributes `json:"attributes,omitempty"` + + // Actions that will be performed by Key Vault over the lifetime of a key. For preview, lifetimeActions can only have two + // items at maximum: one for rotate, one for notify. Notification time would be + // default to 30 days before expiry and it is not configurable. + LifetimeActions []*LifetimeActions `json:"lifetimeActions,omitempty"` + + // READ-ONLY; The key policy id. + ID *string `json:"id,omitempty" azure:"ro"` +} + +// KeyRotationPolicyAttributes - The key rotation policy attributes. +type KeyRotationPolicyAttributes struct { + // The expiryTime will be applied on the new key version. It should be at least 28 days. It will be in ISO 8601 Format. Examples: + // 90 days: P90D, 3 months: P3M, 48 hours: PT48H, 1 year and 10 days: P1Y10D + ExpiryTime *string `json:"expiryTime,omitempty"` + + // READ-ONLY; The key rotation policy created time in UTC. + Created *time.Time `json:"created,omitempty" azure:"ro"` + + // READ-ONLY; The key rotation policy's last updated time in UTC. + Updated *time.Time `json:"updated,omitempty" azure:"ro"` +} + +// KeyVerifyResult - The key verify result. +type KeyVerifyResult struct { + // READ-ONLY; True if the signature is verified, otherwise false. + Value *bool `json:"value,omitempty" azure:"ro"` +} + +// LifetimeActions - Action and its trigger that will be performed by Key Vault over the lifetime of a key. +type LifetimeActions struct { + // The action that will be executed. + Action *LifetimeActionsType `json:"action,omitempty"` + + // The condition that will execute the action. + Trigger *LifetimeActionsTrigger `json:"trigger,omitempty"` +} + +// LifetimeActionsTrigger - A condition to be satisfied for an action to be executed. +type LifetimeActionsTrigger struct { + // Time after creation to attempt to rotate. It only applies to rotate. It will be in ISO 8601 duration format. Example: 90 + // days : "P90D" + TimeAfterCreate *string `json:"timeAfterCreate,omitempty"` + + // Time before expiry to attempt to rotate or notify. It will be in ISO 8601 duration format. Example: 90 days : "P90D" + TimeBeforeExpiry *string `json:"timeBeforeExpiry,omitempty"` +} + +// LifetimeActionsType - The action that will be executed. +type LifetimeActionsType struct { + // The type of the action. + Type *KeyRotationPolicyAction `json:"type,omitempty"` +} + +// RandomBytes - The get random bytes response object containing the bytes. +type RandomBytes struct { + // REQUIRED; The bytes encoded as a base64url string. + Value []byte `json:"value,omitempty"` +} + +// ReleaseParameters - The release key parameters. +type ReleaseParameters struct { + // REQUIRED; The attestation assertion for the target of the key release. + TargetAttestationToken *string `json:"target,omitempty"` + + // The encryption algorithm to use to protected the exported key material + Enc *KeyEncryptionAlgorithm `json:"enc,omitempty"` + + // A client provided nonce for freshness. + Nonce *string `json:"nonce,omitempty"` +} + +// RestoreKeyParameters - The key restore parameters. +type RestoreKeyParameters struct { + // REQUIRED; The backup blob associated with a key bundle. + KeyBundleBackup []byte `json:"value,omitempty"` +} + +// SignParameters - The key operations parameters. +type SignParameters struct { + // REQUIRED; The signing/verification algorithm identifier. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. + Algorithm *JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` + + // REQUIRED + Value []byte `json:"value,omitempty"` +} + +// UpdateKeyParameters - The key update parameters. +type UpdateKeyParameters struct { + // The attributes of a key managed by the key vault service. + KeyAttributes *KeyAttributes `json:"attributes,omitempty"` + + // Json web key operations. For more information on possible key operations, see JsonWebKeyOperation. + KeyOps []*JSONWebKeyOperation `json:"key_ops,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} + +// VerifyParameters - The key verify parameters. +type VerifyParameters struct { + // REQUIRED; The signing/verification algorithm. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. + Algorithm *JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` + + // REQUIRED; The digest used for signing. + Digest []byte `json:"digest,omitempty"` + + // REQUIRED; The signature to be verified. + Signature []byte `json:"value,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/models_serde.go new file mode 100644 index 00000000000..dddb1732a67 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/models_serde.go @@ -0,0 +1,1120 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type BackupKeyResult. +func (b BackupKeyResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", b.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BackupKeyResult. +func (b *BackupKeyResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &b.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CreateKeyParameters. +func (c CreateKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "crv", c.Curve) + populate(objectMap, "attributes", c.KeyAttributes) + populate(objectMap, "key_ops", c.KeyOps) + populate(objectMap, "key_size", c.KeySize) + populate(objectMap, "kty", c.Kty) + populate(objectMap, "public_exponent", c.PublicExponent) + populate(objectMap, "release_policy", c.ReleasePolicy) + populate(objectMap, "tags", c.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CreateKeyParameters. +func (c *CreateKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "crv": + err = unpopulate(val, "Curve", &c.Curve) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "KeyAttributes", &c.KeyAttributes) + delete(rawMsg, key) + case "key_ops": + err = unpopulate(val, "KeyOps", &c.KeyOps) + delete(rawMsg, key) + case "key_size": + err = unpopulate(val, "KeySize", &c.KeySize) + delete(rawMsg, key) + case "kty": + err = unpopulate(val, "Kty", &c.Kty) + delete(rawMsg, key) + case "public_exponent": + err = unpopulate(val, "PublicExponent", &c.PublicExponent) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &c.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &c.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedKeyBundle. +func (d DeletedKeyBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "key", d.Key) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populate(objectMap, "release_policy", d.ReleasePolicy) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedKeyBundle. +func (d *DeletedKeyBundle) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &d.Key) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &d.ReleasePolicy) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedKeyItem. +func (d DeletedKeyItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "kid", d.KID) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedKeyItem. +func (d *DeletedKeyItem) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &d.KID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedKeyListResult. +func (d DeletedKeyListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", d.NextLink) + populate(objectMap, "value", d.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedKeyListResult. +func (d *DeletedKeyListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &d.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &d.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GetRandomBytesRequest. +func (g GetRandomBytesRequest) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "count", g.Count) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GetRandomBytesRequest. +func (g *GetRandomBytesRequest) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "count": + err = unpopulate(val, "Count", &g.Count) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImportKeyParameters. +func (i ImportKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Hsm", i.HSM) + populate(objectMap, "key", i.Key) + populate(objectMap, "attributes", i.KeyAttributes) + populate(objectMap, "release_policy", i.ReleasePolicy) + populate(objectMap, "tags", i.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImportKeyParameters. +func (i *ImportKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Hsm": + err = unpopulate(val, "HSM", &i.HSM) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &i.Key) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "KeyAttributes", &i.KeyAttributes) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &i.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &i.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JSONWebKey. +func (j JSONWebKey) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "crv", j.Crv) + populateByteArray(objectMap, "d", j.D, runtime.Base64URLFormat) + populateByteArray(objectMap, "dp", j.DP, runtime.Base64URLFormat) + populateByteArray(objectMap, "dq", j.DQ, runtime.Base64URLFormat) + populateByteArray(objectMap, "e", j.E, runtime.Base64URLFormat) + populateByteArray(objectMap, "k", j.K, runtime.Base64URLFormat) + populate(objectMap, "kid", j.KID) + populate(objectMap, "key_ops", j.KeyOps) + populate(objectMap, "kty", j.Kty) + populateByteArray(objectMap, "n", j.N, runtime.Base64URLFormat) + populateByteArray(objectMap, "p", j.P, runtime.Base64URLFormat) + populateByteArray(objectMap, "q", j.Q, runtime.Base64URLFormat) + populateByteArray(objectMap, "qi", j.QI, runtime.Base64URLFormat) + populateByteArray(objectMap, "key_hsm", j.T, runtime.Base64URLFormat) + populateByteArray(objectMap, "x", j.X, runtime.Base64URLFormat) + populateByteArray(objectMap, "y", j.Y, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JSONWebKey. +func (j *JSONWebKey) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "crv": + err = unpopulate(val, "Crv", &j.Crv) + delete(rawMsg, key) + case "d": + err = runtime.DecodeByteArray(string(val), &j.D, runtime.Base64URLFormat) + delete(rawMsg, key) + case "dp": + err = runtime.DecodeByteArray(string(val), &j.DP, runtime.Base64URLFormat) + delete(rawMsg, key) + case "dq": + err = runtime.DecodeByteArray(string(val), &j.DQ, runtime.Base64URLFormat) + delete(rawMsg, key) + case "e": + err = runtime.DecodeByteArray(string(val), &j.E, runtime.Base64URLFormat) + delete(rawMsg, key) + case "k": + err = runtime.DecodeByteArray(string(val), &j.K, runtime.Base64URLFormat) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &j.KID) + delete(rawMsg, key) + case "key_ops": + err = unpopulate(val, "KeyOps", &j.KeyOps) + delete(rawMsg, key) + case "kty": + err = unpopulate(val, "Kty", &j.Kty) + delete(rawMsg, key) + case "n": + err = runtime.DecodeByteArray(string(val), &j.N, runtime.Base64URLFormat) + delete(rawMsg, key) + case "p": + err = runtime.DecodeByteArray(string(val), &j.P, runtime.Base64URLFormat) + delete(rawMsg, key) + case "q": + err = runtime.DecodeByteArray(string(val), &j.Q, runtime.Base64URLFormat) + delete(rawMsg, key) + case "qi": + err = runtime.DecodeByteArray(string(val), &j.QI, runtime.Base64URLFormat) + delete(rawMsg, key) + case "key_hsm": + err = runtime.DecodeByteArray(string(val), &j.T, runtime.Base64URLFormat) + delete(rawMsg, key) + case "x": + err = runtime.DecodeByteArray(string(val), &j.X, runtime.Base64URLFormat) + delete(rawMsg, key) + case "y": + err = runtime.DecodeByteArray(string(val), &j.Y, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyAttributes. +func (k KeyAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeUnix(objectMap, "created", k.Created) + populate(objectMap, "enabled", k.Enabled) + populateTimeUnix(objectMap, "exp", k.Expires) + populate(objectMap, "exportable", k.Exportable) + populateTimeUnix(objectMap, "nbf", k.NotBefore) + populate(objectMap, "recoverableDays", k.RecoverableDays) + populate(objectMap, "recoveryLevel", k.RecoveryLevel) + populateTimeUnix(objectMap, "updated", k.Updated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyAttributes. +func (k *KeyAttributes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "created": + err = unpopulateTimeUnix(val, "Created", &k.Created) + delete(rawMsg, key) + case "enabled": + err = unpopulate(val, "Enabled", &k.Enabled) + delete(rawMsg, key) + case "exp": + err = unpopulateTimeUnix(val, "Expires", &k.Expires) + delete(rawMsg, key) + case "exportable": + err = unpopulate(val, "Exportable", &k.Exportable) + delete(rawMsg, key) + case "nbf": + err = unpopulateTimeUnix(val, "NotBefore", &k.NotBefore) + delete(rawMsg, key) + case "recoverableDays": + err = unpopulate(val, "RecoverableDays", &k.RecoverableDays) + delete(rawMsg, key) + case "recoveryLevel": + err = unpopulate(val, "RecoveryLevel", &k.RecoveryLevel) + delete(rawMsg, key) + case "updated": + err = unpopulateTimeUnix(val, "Updated", &k.Updated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyBundle. +func (k KeyBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", k.Attributes) + populate(objectMap, "key", k.Key) + populate(objectMap, "managed", k.Managed) + populate(objectMap, "release_policy", k.ReleasePolicy) + populate(objectMap, "tags", k.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyBundle. +func (k *KeyBundle) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &k.Attributes) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &k.Key) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &k.Managed) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &k.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &k.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyItem. +func (k KeyItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", k.Attributes) + populate(objectMap, "kid", k.KID) + populate(objectMap, "managed", k.Managed) + populate(objectMap, "tags", k.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyItem. +func (k *KeyItem) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &k.Attributes) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &k.KID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &k.Managed) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &k.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyListResult. +func (k KeyListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", k.NextLink) + populate(objectMap, "value", k.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyListResult. +func (k *KeyListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &k.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &k.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyOperationResult. +func (k KeyOperationResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "aad", k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + populateByteArray(objectMap, "tag", k.AuthenticationTag, runtime.Base64URLFormat) + populateByteArray(objectMap, "iv", k.IV, runtime.Base64URLFormat) + populate(objectMap, "kid", k.KID) + populateByteArray(objectMap, "value", k.Result, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyOperationResult. +func (k *KeyOperationResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "aad": + err = runtime.DecodeByteArray(string(val), &k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + delete(rawMsg, key) + case "tag": + err = runtime.DecodeByteArray(string(val), &k.AuthenticationTag, runtime.Base64URLFormat) + delete(rawMsg, key) + case "iv": + err = runtime.DecodeByteArray(string(val), &k.IV, runtime.Base64URLFormat) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &k.KID) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &k.Result, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyOperationsParameters. +func (k KeyOperationsParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "aad", k.AAD, runtime.Base64URLFormat) + populate(objectMap, "alg", k.Algorithm) + populateByteArray(objectMap, "iv", k.IV, runtime.Base64URLFormat) + populateByteArray(objectMap, "tag", k.Tag, runtime.Base64URLFormat) + populateByteArray(objectMap, "value", k.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyOperationsParameters. +func (k *KeyOperationsParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "aad": + err = runtime.DecodeByteArray(string(val), &k.AAD, runtime.Base64URLFormat) + delete(rawMsg, key) + case "alg": + err = unpopulate(val, "Algorithm", &k.Algorithm) + delete(rawMsg, key) + case "iv": + err = runtime.DecodeByteArray(string(val), &k.IV, runtime.Base64URLFormat) + delete(rawMsg, key) + case "tag": + err = runtime.DecodeByteArray(string(val), &k.Tag, runtime.Base64URLFormat) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &k.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyReleasePolicy. +func (k KeyReleasePolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentType", k.ContentType) + populateByteArray(objectMap, "data", k.EncodedPolicy, runtime.Base64URLFormat) + populate(objectMap, "immutable", k.Immutable) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyReleasePolicy. +func (k *KeyReleasePolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentType": + err = unpopulate(val, "ContentType", &k.ContentType) + delete(rawMsg, key) + case "data": + err = runtime.DecodeByteArray(string(val), &k.EncodedPolicy, runtime.Base64URLFormat) + delete(rawMsg, key) + case "immutable": + err = unpopulate(val, "Immutable", &k.Immutable) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyReleaseResult. +func (k KeyReleaseResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", k.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyReleaseResult. +func (k *KeyReleaseResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &k.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyRotationPolicy. +func (k KeyRotationPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", k.Attributes) + populate(objectMap, "id", k.ID) + populate(objectMap, "lifetimeActions", k.LifetimeActions) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyRotationPolicy. +func (k *KeyRotationPolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &k.Attributes) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &k.ID) + delete(rawMsg, key) + case "lifetimeActions": + err = unpopulate(val, "LifetimeActions", &k.LifetimeActions) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyRotationPolicyAttributes. +func (k KeyRotationPolicyAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeUnix(objectMap, "created", k.Created) + populate(objectMap, "expiryTime", k.ExpiryTime) + populateTimeUnix(objectMap, "updated", k.Updated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyRotationPolicyAttributes. +func (k *KeyRotationPolicyAttributes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "created": + err = unpopulateTimeUnix(val, "Created", &k.Created) + delete(rawMsg, key) + case "expiryTime": + err = unpopulate(val, "ExpiryTime", &k.ExpiryTime) + delete(rawMsg, key) + case "updated": + err = unpopulateTimeUnix(val, "Updated", &k.Updated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyVerifyResult. +func (k KeyVerifyResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", k.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyVerifyResult. +func (k *KeyVerifyResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &k.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LifetimeActions. +func (l LifetimeActions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "action", l.Action) + populate(objectMap, "trigger", l.Trigger) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LifetimeActions. +func (l *LifetimeActions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "action": + err = unpopulate(val, "Action", &l.Action) + delete(rawMsg, key) + case "trigger": + err = unpopulate(val, "Trigger", &l.Trigger) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LifetimeActionsTrigger. +func (l LifetimeActionsTrigger) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "timeAfterCreate", l.TimeAfterCreate) + populate(objectMap, "timeBeforeExpiry", l.TimeBeforeExpiry) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LifetimeActionsTrigger. +func (l *LifetimeActionsTrigger) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "timeAfterCreate": + err = unpopulate(val, "TimeAfterCreate", &l.TimeAfterCreate) + delete(rawMsg, key) + case "timeBeforeExpiry": + err = unpopulate(val, "TimeBeforeExpiry", &l.TimeBeforeExpiry) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LifetimeActionsType. +func (l LifetimeActionsType) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "type", l.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LifetimeActionsType. +func (l *LifetimeActionsType) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "type": + err = unpopulate(val, "Type", &l.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RandomBytes. +func (r RandomBytes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", r.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RandomBytes. +func (r *RandomBytes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &r.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ReleaseParameters. +func (r ReleaseParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enc", r.Enc) + populate(objectMap, "nonce", r.Nonce) + populate(objectMap, "target", r.TargetAttestationToken) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ReleaseParameters. +func (r *ReleaseParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enc": + err = unpopulate(val, "Enc", &r.Enc) + delete(rawMsg, key) + case "nonce": + err = unpopulate(val, "Nonce", &r.Nonce) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "TargetAttestationToken", &r.TargetAttestationToken) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RestoreKeyParameters. +func (r RestoreKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", r.KeyBundleBackup, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RestoreKeyParameters. +func (r *RestoreKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &r.KeyBundleBackup, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SignParameters. +func (s SignParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "alg", s.Algorithm) + populateByteArray(objectMap, "value", s.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SignParameters. +func (s *SignParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alg": + err = unpopulate(val, "Algorithm", &s.Algorithm) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &s.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UpdateKeyParameters. +func (u UpdateKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", u.KeyAttributes) + populate(objectMap, "key_ops", u.KeyOps) + populate(objectMap, "release_policy", u.ReleasePolicy) + populate(objectMap, "tags", u.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UpdateKeyParameters. +func (u *UpdateKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "KeyAttributes", &u.KeyAttributes) + delete(rawMsg, key) + case "key_ops": + err = unpopulate(val, "KeyOps", &u.KeyOps) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &u.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &u.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VerifyParameters. +func (v VerifyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "alg", v.Algorithm) + populateByteArray(objectMap, "digest", v.Digest, runtime.Base64URLFormat) + populateByteArray(objectMap, "value", v.Signature, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VerifyParameters. +func (v *VerifyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alg": + err = unpopulate(val, "Algorithm", &v.Algorithm) + delete(rawMsg, key) + case "digest": + err = runtime.DecodeByteArray(string(val), &v.Digest, runtime.Base64URLFormat) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &v.Signature, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func populateByteArray(m map[string]any, k string, b []byte, f runtime.Base64Encoding) { + if azcore.IsNullValue(b) { + m[k] = nil + } else if len(b) == 0 { + return + } else { + m[k] = runtime.EncodeByteArray(b, f) + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/platform-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/platform-matrix.json new file mode 100644 index 00000000000..51367ad9364 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/platform-matrix.json @@ -0,0 +1,17 @@ +{ + "displayNames": { + "@{ enableHsm = $true }": "HSM" + }, + "include": [ + { + "Agent": { + "ubuntu-20.04": { + "OSVmImage": "MMSUbuntu20.04", + "Pool": "azsdk-pool-mms-ubuntu-2004-general" + } + }, + "ArmTemplateParameters": "@{ enableHsm = $true }", + "GoVersion": ["1.18"] + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/response_types.go new file mode 100644 index 00000000000..62eaefa5e3a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/response_types.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +// BackupKeyResponse contains the response from method Client.BackupKey. +type BackupKeyResponse struct { + BackupKeyResult +} + +// CreateKeyResponse contains the response from method Client.CreateKey. +type CreateKeyResponse struct { + KeyBundle +} + +// DecryptResponse contains the response from method Client.Decrypt. +type DecryptResponse struct { + KeyOperationResult +} + +// DeleteKeyResponse contains the response from method Client.DeleteKey. +type DeleteKeyResponse struct { + DeletedKeyBundle +} + +// EncryptResponse contains the response from method Client.Encrypt. +type EncryptResponse struct { + KeyOperationResult +} + +// GetDeletedKeyResponse contains the response from method Client.GetDeletedKey. +type GetDeletedKeyResponse struct { + DeletedKeyBundle +} + +// GetKeyResponse contains the response from method Client.GetKey. +type GetKeyResponse struct { + KeyBundle +} + +// GetKeyRotationPolicyResponse contains the response from method Client.GetKeyRotationPolicy. +type GetKeyRotationPolicyResponse struct { + KeyRotationPolicy +} + +// GetRandomBytesResponse contains the response from method Client.GetRandomBytes. +type GetRandomBytesResponse struct { + RandomBytes +} + +// ImportKeyResponse contains the response from method Client.ImportKey. +type ImportKeyResponse struct { + KeyBundle +} + +// ListDeletedKeysResponse contains the response from method Client.NewListDeletedKeysPager. +type ListDeletedKeysResponse struct { + DeletedKeyListResult +} + +// ListKeyVersionsResponse contains the response from method Client.NewListKeyVersionsPager. +type ListKeyVersionsResponse struct { + KeyListResult +} + +// ListKeysResponse contains the response from method Client.NewListKeysPager. +type ListKeysResponse struct { + KeyListResult +} + +// PurgeDeletedKeyResponse contains the response from method Client.PurgeDeletedKey. +type PurgeDeletedKeyResponse struct { + // placeholder for future response values +} + +// RecoverDeletedKeyResponse contains the response from method Client.RecoverDeletedKey. +type RecoverDeletedKeyResponse struct { + KeyBundle +} + +// ReleaseResponse contains the response from method Client.Release. +type ReleaseResponse struct { + KeyReleaseResult +} + +// RestoreKeyResponse contains the response from method Client.RestoreKey. +type RestoreKeyResponse struct { + KeyBundle +} + +// RotateKeyResponse contains the response from method Client.RotateKey. +type RotateKeyResponse struct { + KeyBundle +} + +// SignResponse contains the response from method Client.Sign. +type SignResponse struct { + KeyOperationResult +} + +// UnwrapKeyResponse contains the response from method Client.UnwrapKey. +type UnwrapKeyResponse struct { + KeyOperationResult +} + +// UpdateKeyResponse contains the response from method Client.UpdateKey. +type UpdateKeyResponse struct { + KeyBundle +} + +// UpdateKeyRotationPolicyResponse contains the response from method Client.UpdateKeyRotationPolicy. +type UpdateKeyRotationPolicyResponse struct { + KeyRotationPolicy +} + +// VerifyResponse contains the response from method Client.Verify. +type VerifyResponse struct { + KeyVerifyResult +} + +// WrapKeyResponse contains the response from method Client.WrapKey. +type WrapKeyResponse struct { + KeyOperationResult +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/test-resources-post.ps1 new file mode 100644 index 00000000000..80f20c0cbfd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/test-resources-post.ps1 @@ -0,0 +1,118 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/New-TestResources.ps1 from the repository root. + +#Requires -Version 6.0 +#Requires -PSEdition Core + +using namespace System.Security.Cryptography +using namespace System.Security.Cryptography.X509Certificates + +# Use same parameter names as declared in eng/New-TestResources.ps1 (assume validation therein). +[CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')] +param ( + [Parameter()] + [hashtable] $DeploymentOutputs, + + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments +) + +# By default stop for any error. +if (!$PSBoundParameters.ContainsKey('ErrorAction')) { + $ErrorActionPreference = 'Stop' +} + +function Log($Message) { + Write-Host ('{0} - {1}' -f [DateTime]::Now.ToLongTimeString(), $Message) +} + +function New-X509Certificate2([string] $SubjectName) { + + $rsa = [RSA]::Create(2048) + try { + $req = [CertificateRequest]::new( + [string] $SubjectName, + $rsa, + [HashAlgorithmName]::SHA256, + [RSASignaturePadding]::Pkcs1 + ) + + # TODO: Add any KUs necessary to $req.CertificateExtensions + + $NotBefore = [DateTimeOffset]::Now.AddDays(-1) + $NotAfter = $NotBefore.AddDays(365) + + $req.CreateSelfSigned($NotBefore, $NotAfter) + } + finally { + $rsa.Dispose() + } +} + +function Export-X509Certificate2([string] $Path, [X509Certificate2] $Certificate) { + + $Certificate.Export([X509ContentType]::Pfx) | Set-Content $Path -AsByteStream +} + +function Export-X509Certificate2PEM([string] $Path, [X509Certificate2] $Certificate) { + +@" +-----BEGIN CERTIFICATE----- +$([Convert]::ToBase64String($Certificate.RawData, 'InsertLineBreaks')) +-----END CERTIFICATE----- +"@ > $Path + +} + +# Make sure we deployed a Managed HSM. +if (!$DeploymentOutputs['AZURE_MANAGEDHSM_URL']) { + Log "Managed HSM not deployed; skipping activation" + exit +} + +[Uri] $hsmUrl = $DeploymentOutputs['AZURE_MANAGEDHSM_URL'] +$hsmName = $hsmUrl.Host.Substring(0, $hsmUrl.Host.IndexOf('.')) + +Log 'Creating 3 X509 certificates to activate security domain' +$wrappingFiles = foreach ($i in 0..2) { + $certificate = New-X509Certificate2 "CN=$($hsmUrl.Host)" + + $baseName = "$PSScriptRoot\$hsmName-certificate$i" + Export-X509Certificate2 "$baseName.pfx" $certificate + Export-X509Certificate2PEM "$baseName.cer" $certificate + + Resolve-Path "$baseName.cer" +} + +Log "Downloading security domain from '$hsmUrl'" + +$sdPath = "$PSScriptRoot\$hsmName-security-domain.key" +if (Test-Path $sdpath) { + Log "Deleting old security domain: $sdPath" + Remove-Item $sdPath -Force +} + +Export-AzKeyVaultSecurityDomain -Name $hsmName -Quorum 2 -Certificates $wrappingFiles -OutputPath $sdPath -ErrorAction SilentlyContinue -Verbose +if ( !$? ) { + Write-Host $Error[0].Exception + Write-Error $Error[0] + + exit +} + +Log "Security domain downloaded to '$sdPath'; Managed HSM is now active at '$hsmUrl'" + +# Force a sleep to wait for Managed HSM activation to propagate through Cosmos replication. Issue tracked in Azure DevOps. +Log 'Sleeping for 30 seconds to allow activation to propagate...' +Start-Sleep -Seconds 30 + +$testApplicationOid = $DeploymentOutputs['CLIENT_OBJECTID'] + +Log "Creating additional required role assignments for '$testApplicationOid'" +$null = New-AzKeyVaultRoleAssignment -HsmName $hsmName -RoleDefinitionName 'Managed HSM Crypto Officer' -ObjectID $testApplicationOid +$null = New-AzKeyVaultRoleAssignment -HsmName $hsmName -RoleDefinitionName 'Managed HSM Crypto User' -ObjectID $testApplicationOid + +Log "Role assignments created for '$testApplicationOid'" \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/test-resources.json new file mode 100644 index 00000000000..abc737a73c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/test-resources.json @@ -0,0 +1,296 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "The base resource name." + } + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The client OID to grant access to test resources." + } + }, + "provisionerApplicationOid": { + "type": "string", + "metadata": { + "description": "The provisioner OID to grant access to test resources." + } + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "The location of the resource. By default, this is the same as the resource group." + } + }, + "hsmLocation": { + "type": "string", + "defaultValue": "uksouth", + "allowedValues": [ + "australiacentral", + "australiaeast", + "canadacentral", + "canadaeast", + "centralindia", + "centralus", + "eastasia", + "eastus", + "eastus2", + "francecentral", + "japaneast", + "koreacentral", + "northcentralus", + "northeurope", + "southafricanorth", + "southcentralus", + "switzerlandnorth", + "switzerlandwest", + "uaenorth", + "uksouth", + "westcentralus", + "westeurope", + "westus", + "westus2", + "westus3" + ], + "metadata": { + "description": "The location of the Managed HSM." + } + }, + "enableHsm": { + "type": "bool", + "defaultValue": false, + "metadata": { + "description": "Whether to enable deployment of Managed HSM. The default is false." + } + }, + "keyVaultSku": { + "type": "string", + "defaultValue": "premium", + "metadata": { + "description": "Key Vault SKU to deploy. The default is 'premium'" + } + }, + "attestationImage": { + "type": "string", + "defaultValue": "keyvault-mock-attestation:latest", + "metadata": { + "description": "The container image name and tag to use for the attestation mock service." + } + } + }, + "variables": { + "attestationFarm": "[concat(parameters('baseName'), 'farm')]", + "attestationSite": "[concat(parameters('baseName'), 'site')]", + "attestationUri": "[concat('DOCKER|azsdkengsys.azurecr.io/', parameters('attestationImage'))]", + "kvApiVersion": "2019-09-01", + "kvName": "[parameters('baseName')]", + "kvAdminDefinitionId": "00482a5a-887f-4fb3-b363-3b7fe8e74483", + "kvAdminAssignmentName": "[guid(resourceGroup().id, variables('kvAdminDefinitionId'), parameters('testApplicationOid'))]", + "hsmApiVersion": "2021-04-01-preview", + "hsmName": "[concat(parameters('baseName'), 'hsm')]", + "mgmtApiVersion": "2019-04-01", + "blobContainerName": "backup", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "encryption": { + "services": { + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.KeyVault/vaults", + "apiVersion": "[variables('kvApiVersion')]", + "name": "[variables('kvName')]", + "location": "[parameters('location')]", + "properties": { + "sku": { + "family": "A", + "name": "[parameters('keyVaultSku')]" + }, + "tenantId": "[parameters('tenantId')]", + "enabledForDeployment": false, + "enabledForDiskEncryption": false, + "enabledForTemplateDeployment": false, + "enableSoftDelete": true, + "enableRbacAuthorization": true, + "softDeleteRetentionInDays": 7 + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "2020-04-01-preview", + "name": "[variables('kvAdminAssignmentName')]", + "properties": { + "roleDefinitionId": "[resourceId('Microsoft.Authorization/roleDefinitions', variables('kvAdminDefinitionId'))]", + "principalId": "[parameters('testApplicationOid')]", + "scope": "[resourceGroup().id]" + } + }, + { + "type": "Microsoft.KeyVault/managedHSMs", + "apiVersion": "[variables('hsmApiVersion')]", + "name": "[variables('hsmName')]", + "condition": "[parameters('enableHsm')]", + "location": "[parameters('hsmLocation')]", + "sku": { + "family": "B", + "name": "Standard_B1" + }, + "properties": { + "tenantId": "[parameters('tenantId')]", + "initialAdminObjectIds": "[union(array(parameters('testApplicationOid')), array(parameters('provisionerApplicationOid')))]", + "enablePurgeProtection": false, + "enableSoftDelete": true, + "softDeleteRetentionInDays": 7, + "publicNetworkAccess": "Enabled", + "networkAcls": "[variables('networkAcls')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[parameters('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "properties": { + "cors": { + "corsRules": [] + }, + "deleteRetentionPolicy": { + "enabled": false + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices/containers", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default/', variables('blobContainerName'))]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts/blobServices', variables('primaryAccountName'), 'default')]", + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "properties": { + "publicAccess": "None" + } + }, + { + + "type": "Microsoft.Web/serverfarms", + "apiVersion": "2020-12-01", + "name": "[variables('attestationFarm')]", + "location": "[parameters('location')]", + "kind": "linux", + "sku": { + "name": "B1" + }, + "properties": { + "reserved": true + } + }, + { + + "type": "Microsoft.Web/sites", + "apiVersion": "2020-12-01", + "name": "[variables('attestationSite')]", + "dependsOn": [ + "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]" + ], + "location": "[parameters('location')]", + "properties": { + "httpsOnly": true, + "serverFarmId": "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]", + "siteConfig": { + "name": "[variables('attestationSite')]", + "alwaysOn": true, + "linuxFxVersion": "[variables('attestationUri')]", + "appSettings": [ + { + "name": "WEBSITES_ENABLE_APP_SERVICE_STORAGE", + "value": "false" + } + ] + } + } + } + ], + "outputs": { + "AZURE_KEYVAULT_URL": { + "type": "string", + "value": "[reference(variables('kvName')).vaultUri]" + }, + "AZURE_MANAGEDHSM_URL": { + "type": "string", + "condition": "[parameters('enableHsm')]", + "value": "[reference(variables('hsmName')).hsmUri]" + }, + "KEYVAULT_SKU": { + "type": "string", + "value": "[reference(parameters('baseName')).sku.name]" + }, + "CLIENT_OBJECTID": { + "type": "string", + "value": "[parameters('testApplicationOid')]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "BLOB_PRIMARY_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(variables('primaryAccountName'), variables('mgmtApiVersion')).keys[0].value]" + }, + "BLOB_CONTAINER_NAME" : { + "type": "string", + "value": "[variables('blobContainerName')]" + }, + "AZURE_KEYVAULT_ATTESTATION_URL": { + "type": "string", + "value": "[format('https://{0}/', reference(variables('attestationSite')).defaultHostName)]" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/time_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/time_unix.go new file mode 100644 index 00000000000..077d3ae42d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/time_unix.go @@ -0,0 +1,62 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "strings" + "time" +) + +type timeUnix time.Time + +func (t timeUnix) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(t).Unix()) +} + +func (t *timeUnix) UnmarshalJSON(data []byte) error { + var seconds int64 + if err := json.Unmarshal(data, &seconds); err != nil { + return err + } + *t = timeUnix(time.Unix(seconds, 0)) + return nil +} + +func (t timeUnix) String() string { + return fmt.Sprintf("%d", time.Time(t).Unix()) +} + +func populateTimeUnix(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*timeUnix)(t) +} + +func unpopulateTimeUnix(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux timeUnix + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/version.go new file mode 100644 index 00000000000..0244e9f14f7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azkeys + +const ( + moduleName = "azkeys" + version = "v0.10.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md new file mode 100644 index 00000000000..95d77dc9521 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md @@ -0,0 +1,54 @@ +# Release History + +## 0.7.1 (2022-11-14) + +### Bugs Fixed +* `KeyVaultChallengePolicy` uses incorrect authentication scope when challenge verification is disabled + +## 0.7.0 (2022-09-20) + +### Breaking Changes +* Added `*KeyVaultChallengePolicyOptions` parameter to `NewKeyVaultChallengePolicy` + +## 0.6.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. See https://aka.ms/azsdk/blog/vault-uri for more information. +* `ParseID()` no longer appends a trailing slash to vault URLs + +## 0.5.0 (2022-05-12) + +### Breaking Changes +* Removed `ExpiringResource` and its dependencies in favor of shared implementation from `internal/temporal`. + +### Other Changes +* Updated to latest versions of `azcore` and `internal`. + +## 0.4.0 (2022-04-22) + +### Breaking Changes +* Updated `ExpiringResource` and its dependent types to use generics. + +### Other Changes +* Remove reference to `TokenRequestOptions.TenantID` as it's been removed and wasn't working anyways. + +## 0.3.0 (2022-04-04) + +### Features Added +* Adds the `ParseKeyvaultID` function to parse an ID into the Key Vault URL, item name, and item version + +### Breaking Changes +* Updates to azcore v0.23.0 + +## 0.2.1 (2022-01-31) + +### Bugs Fixed +* Avoid retries on terminal failures (#16932) + +## 0.2.0 (2022-01-12) + +### Bugs Fixed +* Fixes a bug with Managed HSMs that prevented correctly authorizing requests. + +## 0.1.0 (2021-11-09) +* This is the initial release of the `internal` library for KeyVault diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt new file mode 100644 index 00000000000..d1ca00f20a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md new file mode 100644 index 00000000000..bd4826705d5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md @@ -0,0 +1,23 @@ +# Key Vault Internal Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal) + +This module contains shared code for all the Key Vault SDKs, mainly the challenge authentication policy. + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go new file mode 100644 index 00000000000..4cc1e429a64 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go @@ -0,0 +1,255 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +const ( + headerAuthorization = "Authorization" + challengeMatchError = `challenge resource "%s" doesn't match the requested domain. Set DisableChallengeResourceVerification to true in your client options to disable. See https://aka.ms/azsdk/blog/vault-uri for more information` + bearerHeader = "Bearer " +) + +type KeyVaultChallengePolicyOptions struct { + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain + DisableChallengeResourceVerification bool +} + +type KeyVaultChallengePolicy struct { + // mainResource is the resource to be retrieved using the tenant specified in the credential + mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState] + cred azcore.TokenCredential + scope *string + tenantID *string + verifyChallengeResource bool +} + +func NewKeyVaultChallengePolicy(cred azcore.TokenCredential, opts *KeyVaultChallengePolicyOptions) *KeyVaultChallengePolicy { + if opts == nil { + opts = &KeyVaultChallengePolicyOptions{} + } + return &KeyVaultChallengePolicy{ + cred: cred, + mainResource: temporal.NewResource(acquire), + verifyChallengeResource: !opts.DisableChallengeResourceVerification, + } +} + +func (k *KeyVaultChallengePolicy) Do(req *policy.Request) (*http.Response, error) { + as := acquiringResourceState{ + p: k, + req: req, + } + + if k.scope == nil || k.tenantID == nil { + // First request, get both to get the token + challengeReq, err := k.getChallengeRequest(*req) + if err != nil { + return nil, err + } + + resp, err := challengeReq.Next() + if err != nil { + return nil, err + } + + if resp.StatusCode > 399 && resp.StatusCode != http.StatusUnauthorized { + // the request failed for some other reason, don't try any further + return resp, nil + } + err = k.findScopeAndTenant(resp, req.Raw()) + if err != nil { + return nil, err + } + } + + tk, err := k.mainResource.Get(as) + if err != nil { + return nil, err + } + + req.Raw().Header.Set( + headerAuthorization, + fmt.Sprintf("%s%s", bearerHeader, tk.Token), + ) + + // send a copy of the request + cloneReq := req.Clone(req.Raw().Context()) + resp, cloneReqErr := cloneReq.Next() + if cloneReqErr != nil { + return nil, cloneReqErr + } + + // If it fails and has a 401, try it with a new token + if resp.StatusCode == 401 { + // Force a new token + k.mainResource.Expire() + + // Find the scope and tenant again in case they have changed + err := k.findScopeAndTenant(resp, req.Raw()) + if err != nil { + // Error parsing challenge, doomed to fail. Return + return resp, cloneReqErr + } + + tk, err := k.mainResource.Get(as) + if err != nil { + return resp, err + } + + req.Raw().Header.Set( + headerAuthorization, + bearerHeader+tk.Token, + ) + + // send the original request now + return req.Next() + } + + return resp, err +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000 +func parseTenant(url string) *string { + if url == "" { + return to.Ptr("") + } + parts := strings.Split(url, "/") + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return &tenant +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +var _ errorinfo.NonRetriable = (*challengePolicyError)(nil) + +// sets the k.scope and k.tenantID from the WWW-Authenticate header +func (k *KeyVaultChallengePolicy) findScopeAndTenant(resp *http.Response, req *http.Request) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization=\"\" resource=\"\"" OR + // "Bearer authorization=\"\" scope=\"\" resource=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + k.tenantID = parseTenant(vals["authorization"]) + scope := "" + if v, ok := vals["scope"]; ok { + scope = v + } else if v, ok := vals["resource"]; ok { + scope = v + } + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + if k.verifyChallengeResource { + // the challenge resource's host must match the requested vault's host + parsed, err := url.Parse(scope) + if err != nil { + return &challengePolicyError{err: fmt.Errorf(`invalid challenge resource "%s": %v`, scope, err)} + } + if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { + return &challengePolicyError{err: fmt.Errorf(challengeMatchError, scope)} + } + } + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + k.scope = &scope + return nil +} + +func (k KeyVaultChallengePolicy) getChallengeRequest(orig policy.Request) (*policy.Request, error) { + req, err := runtime.NewRequest(orig.Raw().Context(), orig.Raw().Method, orig.Raw().URL.String()) + if err != nil { + return nil, &challengePolicyError{err: err} + } + + req.Raw().Header = orig.Raw().Header + req.Raw().Header.Set("Content-Length", "0") + req.Raw().ContentLength = 0 + + copied := orig.Clone(orig.Raw().Context()) + copied.Raw().Body = req.Body() + copied.Raw().ContentLength = 0 + copied.Raw().Header.Set("Content-Length", "0") + err = copied.SetBody(streaming.NopCloser(bytes.NewReader([]byte{})), "application/json") + if err != nil { + return nil, &challengePolicyError{err: err} + } + copied.Raw().Header.Del("Content-Type") + + return copied, err +} + +type acquiringResourceState struct { + req *policy.Request + p *KeyVaultChallengePolicy +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken( + state.req.Raw().Context(), + policy.TokenRequestOptions{ + Scopes: []string{*state.p.scope}, + }, + ) + if err != nil { + return azcore.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml new file mode 100644 index 00000000000..d72c650135e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/internal + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/internal + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'keyvault/internal' + RunLiveTests: false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go new file mode 100644 index 00000000000..cd94eb0d834 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go @@ -0,0 +1,11 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +const ( + version = "v0.7.1" //nolint +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go new file mode 100644 index 00000000000..d8f93492f51 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go new file mode 100644 index 00000000000..8511832d27c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +package internal + +import ( + "fmt" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" +) + +// ParseID parses "https://myvaultname.vault.azure.net/keys/key1053998307/b86c2e6ad9054f4abf69cc185b99aa60" +// into "https://myvaultname.managedhsm.azure.net/", "key1053998307", and "b86c2e6ad9054f4abf69cc185b99aa60" +func ParseID(id *string) (*string, *string, *string) { + if id == nil { + return nil, nil, nil + } + parsed, err := url.Parse(*id) + if err != nil { + return nil, nil, nil + } + + url := fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host) + split := strings.Split(strings.TrimPrefix(parsed.Path, "/"), "/") + if len(split) < 3 { + if len(split) == 2 { + return &url, to.Ptr(split[1]), nil + } + return &url, nil, nil + } + + return &url, to.Ptr(split[1]), to.Ptr(split[2]) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go deleted file mode 100644 index 1f183448209..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go +++ /dev/null @@ -1,65 +0,0 @@ -package auth - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -import ( - "os" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/azure/auth" -) - -// NewAuthorizerFromEnvironment creates a keyvault dataplane Authorizer configured from environment variables in the order: -// 1. Client credentials -// 2. Client certificate -// 3. Username password -// 4. MSI -func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { - res, err := getResource() - if err != nil { - return nil, err - } - return auth.NewAuthorizerFromEnvironmentWithResource(res) -} - -// NewAuthorizerFromFile creates a keyvault dataplane Authorizer configured from a configuration file. -// The path to the configuration file must be specified in the AZURE_AUTH_LOCATION environment variable. -func NewAuthorizerFromFile() (autorest.Authorizer, error) { - res, err := getResource() - if err != nil { - return nil, err - } - return auth.NewAuthorizerFromFileWithResource(res) -} - -// NewAuthorizerFromCLI creates a keyvault dataplane Authorizer configured from Azure CLI 2.0 for local development scenarios. -func NewAuthorizerFromCLI() (autorest.Authorizer, error) { - res, err := getResource() - if err != nil { - return nil, err - } - return auth.NewAuthorizerFromCLIWithResource(res) -} - -func getResource() (string, error) { - var env azure.Environment - - if envName := os.Getenv("AZURE_ENVIRONMENT"); envName == "" { - env = azure.PublicCloud - } else { - var err error - env, err = azure.EnvironmentFromName(envName) - if err != nil { - return "", err - } - } - - resource := os.Getenv("AZURE_KEYVAULT_RESOURCE") - if resource == "" { - resource = env.ResourceIdentifiers.KeyVault - } - - return resource, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md deleted file mode 100644 index 6c701c1c40a..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md +++ /dev/null @@ -1,26 +0,0 @@ -# Change History - -## Additive Changes - -### New Funcs - -1. BackupCertificateResult.MarshalJSON() ([]byte, error) -1. BackupKeyResult.MarshalJSON() ([]byte, error) -1. BackupSecretResult.MarshalJSON() ([]byte, error) -1. BackupStorageResult.MarshalJSON() ([]byte, error) -1. CertificateIssuerListResult.MarshalJSON() ([]byte, error) -1. CertificateListResult.MarshalJSON() ([]byte, error) -1. DeletedCertificateListResult.MarshalJSON() ([]byte, error) -1. DeletedKeyListResult.MarshalJSON() ([]byte, error) -1. DeletedSasDefinitionListResult.MarshalJSON() ([]byte, error) -1. DeletedSecretListResult.MarshalJSON() ([]byte, error) -1. DeletedStorageListResult.MarshalJSON() ([]byte, error) -1. Error.MarshalJSON() ([]byte, error) -1. ErrorType.MarshalJSON() ([]byte, error) -1. KeyListResult.MarshalJSON() ([]byte, error) -1. KeyOperationResult.MarshalJSON() ([]byte, error) -1. KeyVerifyResult.MarshalJSON() ([]byte, error) -1. PendingCertificateSigningRequestResult.MarshalJSON() ([]byte, error) -1. SasDefinitionListResult.MarshalJSON() ([]byte, error) -1. SecretListResult.MarshalJSON() ([]byte, error) -1. StorageListResult.MarshalJSON() ([]byte, error) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go deleted file mode 100644 index e101ccbd14f..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go +++ /dev/null @@ -1,7313 +0,0 @@ -// Package keyvault implements the Azure ARM Keyvault service API version 7.1. -// -// The key vault client performs cryptographic key operations and vault operations against the Key Vault service. -package keyvault - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// BaseClient is the base client for Keyvault. -type BaseClient struct { - autorest.Client -} - -// New creates an instance of the BaseClient client. -func New() BaseClient { - return NewWithoutDefaults() -} - -// NewWithoutDefaults creates an instance of the BaseClient client. -func NewWithoutDefaults() BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - } -} - -// BackupCertificate requests that a backup of the specified certificate be downloaded to the client. All versions of -// the certificate will be downloaded. This operation requires the certificates/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) BackupCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result BackupCertificateResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.BackupCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure sending request") - return - } - - result, err = client.BackupCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure responding to request") - return - } - - return -} - -// BackupCertificatePreparer prepares the BackupCertificate request. -func (client BaseClient) BackupCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupCertificateSender sends the BackupCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupCertificateResponder handles the response to the BackupCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupCertificateResponder(resp *http.Response) (result BackupCertificateResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupKey the Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this operation -// does NOT return key material in a form that can be used outside the Azure Key Vault system, the returned key -// material is either protected to a Azure Key Vault HSM or to Azure Key Vault itself. The intent of this operation is -// to allow a client to GENERATE a key in one Azure Key Vault instance, BACKUP the key, and then RESTORE it into -// another Azure Key Vault instance. The BACKUP operation may be used to export, in protected form, any key type from -// Azure Key Vault. Individual versions of a key cannot be backed up. BACKUP / RESTORE can be performed within -// geographical boundaries only; meaning that a BACKUP from one geographical area cannot be restored to another -// geographical area. For example, a backup from the US geographical area cannot be restored in an EU geographical -// area. This operation requires the key/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -func (client BaseClient) BackupKey(ctx context.Context, vaultBaseURL string, keyName string) (result BackupKeyResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", nil, "Failure preparing request") - return - } - - resp, err := client.BackupKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure sending request") - return - } - - result, err = client.BackupKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure responding to request") - return - } - - return -} - -// BackupKeyPreparer prepares the BackupKey request. -func (client BaseClient) BackupKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupKeySender sends the BackupKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupKeyResponder handles the response to the BackupKey request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupKeyResponder(resp *http.Response) (result BackupKeyResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupSecret requests that a backup of the specified secret be downloaded to the client. All versions of the secret -// will be downloaded. This operation requires the secrets/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) BackupSecret(ctx context.Context, vaultBaseURL string, secretName string) (result BackupSecretResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", nil, "Failure preparing request") - return - } - - resp, err := client.BackupSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure sending request") - return - } - - result, err = client.BackupSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure responding to request") - return - } - - return -} - -// BackupSecretPreparer prepares the BackupSecret request. -func (client BaseClient) BackupSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupSecretSender sends the BackupSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupSecretResponder handles the response to the BackupSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupSecretResponder(resp *http.Response) (result BackupSecretResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupStorageAccount requests that a backup of the specified storage account be downloaded to the client. This -// operation requires the storage/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) BackupStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result BackupStorageResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.BackupStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.BackupStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// BackupStorageAccountPreparer prepares the BackupStorageAccount request. -func (client BaseClient) BackupStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupStorageAccountSender sends the BackupStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupStorageAccountResponder handles the response to the BackupStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupStorageAccountResponder(resp *http.Response) (result BackupStorageResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateCertificate if this is the first version, the certificate resource is created. This operation requires the -// certificates/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to create a certificate. -func (client BaseClient) CreateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: certificateName, - Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "CreateCertificate", err.Error()) - } - - req, err := client.CreateCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure sending request") - return - } - - result, err = client.CreateCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure responding to request") - return - } - - return -} - -// CreateCertificatePreparer prepares the CreateCertificate request. -func (client BaseClient) CreateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/create", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateCertificateSender sends the CreateCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) CreateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CreateCertificateResponder handles the response to the CreateCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) CreateCertificateResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateKey the create key operation can be used to create any key type in Azure Key Vault. If the named key already -// exists, Azure Key Vault creates a new version of the key. It requires the keys/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name for the new key. The system will generate the version name for the new key. -// parameters - the parameters to create a key. -func (client BaseClient) CreateKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: keyName, - Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "CreateKey", err.Error()) - } - - req, err := client.CreateKeyPreparer(ctx, vaultBaseURL, keyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", nil, "Failure preparing request") - return - } - - resp, err := client.CreateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure sending request") - return - } - - result, err = client.CreateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure responding to request") - return - } - - return -} - -// CreateKeyPreparer prepares the CreateKey request. -func (client BaseClient) CreateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/create", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateKeySender sends the CreateKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) CreateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CreateKeyResponder handles the response to the CreateKey request. The method always -// closes the http.Response Body. -func (client BaseClient) CreateKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Decrypt the DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption key and -// specified algorithm. This operation is the reverse of the ENCRYPT operation; only a single block of data may be -// decrypted, the size of this block is dependent on the target key and the algorithm to be used. The DECRYPT operation -// applies to asymmetric and symmetric keys stored in Azure Key Vault since it uses the private portion of the key. -// This operation requires the keys/decrypt permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the decryption operation. -func (client BaseClient) Decrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Decrypt") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Decrypt", err.Error()) - } - - req, err := client.DecryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", nil, "Failure preparing request") - return - } - - resp, err := client.DecryptSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure sending request") - return - } - - result, err = client.DecryptResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure responding to request") - return - } - - return -} - -// DecryptPreparer prepares the Decrypt request. -func (client BaseClient) DecryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/decrypt", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DecryptSender sends the Decrypt request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DecryptSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DecryptResponder handles the response to the Decrypt request. The method always -// closes the http.Response Body. -func (client BaseClient) DecryptResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificate deletes all versions of a certificate object along with its associated policy. Delete certificate -// cannot be used to remove individual versions of a certificate object. This operation requires the -// certificates/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) DeleteCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificatePreparer prepares the DeleteCertificate request. -func (client BaseClient) DeleteCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateSender sends the DeleteCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateResponder handles the response to the DeleteCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateContacts deletes the certificate contacts for a specified key vault certificate. This operation -// requires the certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -func (client BaseClient) DeleteCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateContactsPreparer(ctx, vaultBaseURL) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificateContactsPreparer prepares the DeleteCertificateContacts request. -func (client BaseClient) DeleteCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateContactsSender sends the DeleteCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateContactsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateContactsResponder handles the response to the DeleteCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateIssuer the DeleteCertificateIssuer operation permanently removes the specified certificate issuer -// from the vault. This operation requires the certificates/manageissuers/deleteissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -func (client BaseClient) DeleteCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificateIssuerPreparer prepares the DeleteCertificateIssuer request. -func (client BaseClient) DeleteCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateIssuerSender sends the DeleteCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateIssuerResponder handles the response to the DeleteCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateOperation deletes the creation operation for a specified certificate that is in the process of -// being created. The certificate is no longer created. This operation requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) DeleteCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificateOperationPreparer prepares the DeleteCertificateOperation request. -func (client BaseClient) DeleteCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateOperationSender sends the DeleteCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateOperationResponder handles the response to the DeleteCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteKey the delete key operation cannot be used to remove individual versions of a key. This operation removes the -// cryptographic material associated with the key, which means the key is not usable for Sign/Verify, Wrap/Unwrap or -// Encrypt/Decrypt operations. This operation requires the keys/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key to delete. -func (client BaseClient) DeleteKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure sending request") - return - } - - result, err = client.DeleteKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure responding to request") - return - } - - return -} - -// DeleteKeyPreparer prepares the DeleteKey request. -func (client BaseClient) DeleteKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteKeySender sends the DeleteKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteKeyResponder handles the response to the DeleteKey request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteSasDefinition deletes a SAS definition from a specified storage account. This operation requires the -// storage/deletesas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) DeleteSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "DeleteSasDefinition", err.Error()) - } - - req, err := client.DeleteSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.DeleteSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// DeleteSasDefinitionPreparer prepares the DeleteSasDefinition request. -func (client BaseClient) DeleteSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSasDefinitionSender sends the DeleteSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteSasDefinitionResponder handles the response to the DeleteSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteSecret the DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied to an -// individual version of a secret. This operation requires the secrets/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) DeleteSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure sending request") - return - } - - result, err = client.DeleteSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure responding to request") - return - } - - return -} - -// DeleteSecretPreparer prepares the DeleteSecret request. -func (client BaseClient) DeleteSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSecretSender sends the DeleteSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteSecretResponder handles the response to the DeleteSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteStorageAccount deletes a storage account. This operation requires the storage/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) DeleteStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "DeleteStorageAccount", err.Error()) - } - - req, err := client.DeleteStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.DeleteStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// DeleteStorageAccountPreparer prepares the DeleteStorageAccount request. -func (client BaseClient) DeleteStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteStorageAccountSender sends the DeleteStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteStorageAccountResponder handles the response to the DeleteStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Encrypt the ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is stored in -// Azure Key Vault. Note that the ENCRYPT operation only supports a single block of data, the size of which is -// dependent on the target key and the encryption algorithm to be used. The ENCRYPT operation is only strictly -// necessary for symmetric keys stored in Azure Key Vault since protection with an asymmetric key can be performed -// using public portion of the key. This operation is supported for asymmetric keys as a convenience for callers that -// have a key-reference but do not have access to the public key material. This operation requires the keys/encrypt -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the encryption operation. -func (client BaseClient) Encrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Encrypt") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Encrypt", err.Error()) - } - - req, err := client.EncryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", nil, "Failure preparing request") - return - } - - resp, err := client.EncryptSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure sending request") - return - } - - result, err = client.EncryptResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure responding to request") - return - } - - return -} - -// EncryptPreparer prepares the Encrypt request. -func (client BaseClient) EncryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/encrypt", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// EncryptSender sends the Encrypt request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) EncryptSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// EncryptResponder handles the response to the Encrypt request. The method always -// closes the http.Response Body. -func (client BaseClient) EncryptResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificate gets information about a specific certificate. This operation requires the certificates/get -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given vault. -// certificateVersion - the version of the certificate. This URI fragment is optional. If not specified, the -// latest version of the certificate is returned. -func (client BaseClient) GetCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificatePreparer prepares the GetCertificate request. -func (client BaseClient) GetCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - "certificate-version": autorest.Encode("path", certificateVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateSender sends the GetCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateResponder handles the response to the GetCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateContacts the GetCertificateContacts operation returns the set of certificate contact resources in the -// specified key vault. This operation requires the certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -func (client BaseClient) GetCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateContactsPreparer(ctx, vaultBaseURL) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificateContactsPreparer prepares the GetCertificateContacts request. -func (client BaseClient) GetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateContactsSender sends the GetCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateContactsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateContactsResponder handles the response to the GetCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateIssuer the GetCertificateIssuer operation returns the specified certificate issuer resources in the -// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -func (client BaseClient) GetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificateIssuerPreparer prepares the GetCertificateIssuer request. -func (client BaseClient) GetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateIssuerSender sends the GetCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateIssuerResponder handles the response to the GetCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateIssuers the GetCertificateIssuers operation returns the set of certificate issuer resources in the -// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetCertificateIssuers(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") - defer func() { - sc := -1 - if result.cilr.Response.Response != nil { - sc = result.cilr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificateIssuers", err.Error()) - } - - result.fn = client.getCertificateIssuersNextResults - req, err := client.GetCertificateIssuersPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateIssuersSender(req) - if err != nil { - result.cilr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure sending request") - return - } - - result.cilr, err = client.GetCertificateIssuersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure responding to request") - return - } - if result.cilr.hasNextLink() && result.cilr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetCertificateIssuersPreparer prepares the GetCertificateIssuers request. -func (client BaseClient) GetCertificateIssuersPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/issuers"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateIssuersSender sends the GetCertificateIssuers request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateIssuersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateIssuersResponder handles the response to the GetCertificateIssuers request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateIssuersResponder(resp *http.Response) (result CertificateIssuerListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificateIssuersNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificateIssuersNextResults(ctx context.Context, lastResults CertificateIssuerListResult) (result CertificateIssuerListResult, err error) { - req, err := lastResults.certificateIssuerListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificateIssuersSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificateIssuersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificateIssuersComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificateIssuersComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificateIssuers(ctx, vaultBaseURL, maxresults) - return -} - -// GetCertificateOperation gets the creation operation associated with a specified certificate. This operation requires -// the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) GetCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificateOperationPreparer prepares the GetCertificateOperation request. -func (client BaseClient) GetCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateOperationSender sends the GetCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateOperationResponder handles the response to the GetCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificatePolicy the GetCertificatePolicy operation returns the specified certificate policy resources in the -// specified key vault. This operation requires the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in a given key vault. -func (client BaseClient) GetCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificatePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificatePolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificatePolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure sending request") - return - } - - result, err = client.GetCertificatePolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificatePolicyPreparer prepares the GetCertificatePolicy request. -func (client BaseClient) GetCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificatePolicySender sends the GetCertificatePolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificatePolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificatePolicyResponder handles the response to the GetCertificatePolicy request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificates the GetCertificates operation returns the set of certificates resources in the specified key vault. -// This operation requires the certificates/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -// includePending - specifies whether to include certificates which are not completely provisioned. -func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") - defer func() { - sc := -1 - if result.clr.Response.Response != nil { - sc = result.clr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificates", err.Error()) - } - - result.fn = client.getCertificatesNextResults - req, err := client.GetCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificatesSender(req) - if err != nil { - result.clr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure sending request") - return - } - - result.clr, err = client.GetCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure responding to request") - return - } - if result.clr.hasNextLink() && result.clr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetCertificatesPreparer prepares the GetCertificates request. -func (client BaseClient) GetCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - if includePending != nil { - queryParameters["includePending"] = autorest.Encode("query", *includePending) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificatesSender sends the GetCertificates request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificatesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificatesResponder handles the response to the GetCertificates request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificatesResponder(resp *http.Response) (result CertificateListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificatesNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificatesNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { - req, err := lastResults.certificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificatesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificates(ctx, vaultBaseURL, maxresults, includePending) - return -} - -// GetCertificateVersions the GetCertificateVersions operation returns the versions of a certificate in the specified -// key vault. This operation requires the certificates/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetCertificateVersions(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") - defer func() { - sc := -1 - if result.clr.Response.Response != nil { - sc = result.clr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificateVersions", err.Error()) - } - - result.fn = client.getCertificateVersionsNextResults - req, err := client.GetCertificateVersionsPreparer(ctx, vaultBaseURL, certificateName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateVersionsSender(req) - if err != nil { - result.clr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure sending request") - return - } - - result.clr, err = client.GetCertificateVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure responding to request") - return - } - if result.clr.hasNextLink() && result.clr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetCertificateVersionsPreparer prepares the GetCertificateVersions request. -func (client BaseClient) GetCertificateVersionsPreparer(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateVersionsSender sends the GetCertificateVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateVersionsResponder handles the response to the GetCertificateVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateVersionsResponder(resp *http.Response) (result CertificateListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificateVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificateVersionsNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { - req, err := lastResults.certificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificateVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificateVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificateVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificateVersionsComplete(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificateVersions(ctx, vaultBaseURL, certificateName, maxresults) - return -} - -// GetDeletedCertificate the GetDeletedCertificate operation retrieves the deleted certificate information plus its -// attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This -// operation requires the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate -func (client BaseClient) GetDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedCertificatePreparer prepares the GetDeletedCertificate request. -func (client BaseClient) GetDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedCertificateSender sends the GetDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedCertificateResponder handles the response to the GetDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedCertificates the GetDeletedCertificates operation retrieves the certificates in the current vault which -// are in a deleted state and ready for recovery or purging. This operation includes deletion-specific information. -// This operation requires the certificates/get/list permission. This operation can only be enabled on soft-delete -// enabled vaults. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -// includePending - specifies whether to include certificates which are not completely provisioned. -func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") - defer func() { - sc := -1 - if result.dclr.Response.Response != nil { - sc = result.dclr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedCertificates", err.Error()) - } - - result.fn = client.getDeletedCertificatesNextResults - req, err := client.GetDeletedCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedCertificatesSender(req) - if err != nil { - result.dclr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure sending request") - return - } - - result.dclr, err = client.GetDeletedCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure responding to request") - return - } - if result.dclr.hasNextLink() && result.dclr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedCertificatesPreparer prepares the GetDeletedCertificates request. -func (client BaseClient) GetDeletedCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - if includePending != nil { - queryParameters["includePending"] = autorest.Encode("query", *includePending) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedcertificates"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedCertificatesSender sends the GetDeletedCertificates request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedCertificatesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedCertificatesResponder handles the response to the GetDeletedCertificates request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedCertificatesResponder(resp *http.Response) (result DeletedCertificateListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedCertificatesNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedCertificatesNextResults(ctx context.Context, lastResults DeletedCertificateListResult) (result DeletedCertificateListResult, err error) { - req, err := lastResults.deletedCertificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedCertificatesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedCertificates(ctx, vaultBaseURL, maxresults, includePending) - return -} - -// GetDeletedKey the Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can be -// invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation requires -// the keys/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -func (client BaseClient) GetDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedKeyPreparer prepares the GetDeletedKey request. -func (client BaseClient) GetDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedKeySender sends the GetDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedKeyResponder handles the response to the GetDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part -// of a deleted key. This operation includes deletion-specific information. The Get Deleted Keys operation is -// applicable for vaults enabled for soft-delete. While the operation can be invoked on any vault, it will return an -// error if invoked on a non soft-delete enabled vault. This operation requires the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") - defer func() { - sc := -1 - if result.dklr.Response.Response != nil { - sc = result.dklr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedKeys", err.Error()) - } - - result.fn = client.getDeletedKeysNextResults - req, err := client.GetDeletedKeysPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedKeysSender(req) - if err != nil { - result.dklr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure sending request") - return - } - - result.dklr, err = client.GetDeletedKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure responding to request") - return - } - if result.dklr.hasNextLink() && result.dklr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedKeysPreparer prepares the GetDeletedKeys request. -func (client BaseClient) GetDeletedKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedkeys"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedKeysSender sends the GetDeletedKeys request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedKeysResponder handles the response to the GetDeletedKeys request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedKeysResponder(resp *http.Response) (result DeletedKeyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedKeysNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedKeysNextResults(ctx context.Context, lastResults DeletedKeyListResult) (result DeletedKeyListResult, err error) { - req, err := lastResults.deletedKeyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedKeysComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedKeys(ctx, vaultBaseURL, maxresults) - return -} - -// GetDeletedSasDefinition the Get Deleted SAS Definition operation returns the specified deleted SAS definition along -// with its attributes. This operation requires the storage/getsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) GetDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinition", err.Error()) - } - - req, err := client.GetDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedSasDefinitionPreparer prepares the GetDeletedSasDefinition request. -func (client BaseClient) GetDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSasDefinitionSender sends the GetDeletedSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSasDefinitionResponder handles the response to the GetDeletedSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedSasDefinitions the Get Deleted Sas Definitions operation returns the SAS definitions that have been -// deleted for a vault enabled for soft-delete. This operation requires the storage/listsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") - defer func() { - sc := -1 - if result.dsdlr.Response.Response != nil { - sc = result.dsdlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinitions", err.Error()) - } - - result.fn = client.getDeletedSasDefinitionsNextResults - req, err := client.GetDeletedSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSasDefinitionsSender(req) - if err != nil { - result.dsdlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure sending request") - return - } - - result.dsdlr, err = client.GetDeletedSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure responding to request") - return - } - if result.dsdlr.hasNextLink() && result.dsdlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedSasDefinitionsPreparer prepares the GetDeletedSasDefinitions request. -func (client BaseClient) GetDeletedSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSasDefinitionsSender sends the GetDeletedSasDefinitions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSasDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSasDefinitionsResponder handles the response to the GetDeletedSasDefinitions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSasDefinitionsResponder(resp *http.Response) (result DeletedSasDefinitionListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedSasDefinitionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedSasDefinitionsNextResults(ctx context.Context, lastResults DeletedSasDefinitionListResult) (result DeletedSasDefinitionListResult, err error) { - req, err := lastResults.deletedSasDefinitionListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedSasDefinitionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) - return -} - -// GetDeletedSecret the Get Deleted Secret operation returns the specified deleted secret along with its attributes. -// This operation requires the secrets/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) GetDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedSecretPreparer prepares the GetDeletedSecret request. -func (client BaseClient) GetDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSecretSender sends the GetDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSecretResponder handles the response to the GetDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedSecrets the Get Deleted Secrets operation returns the secrets that have been deleted for a vault enabled -// for soft-delete. This operation requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") - defer func() { - sc := -1 - if result.dslr.Response.Response != nil { - sc = result.dslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSecrets", err.Error()) - } - - result.fn = client.getDeletedSecretsNextResults - req, err := client.GetDeletedSecretsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSecretsSender(req) - if err != nil { - result.dslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure sending request") - return - } - - result.dslr, err = client.GetDeletedSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure responding to request") - return - } - if result.dslr.hasNextLink() && result.dslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedSecretsPreparer prepares the GetDeletedSecrets request. -func (client BaseClient) GetDeletedSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedsecrets"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSecretsSender sends the GetDeletedSecrets request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSecretsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSecretsResponder handles the response to the GetDeletedSecrets request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSecretsResponder(resp *http.Response) (result DeletedSecretListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedSecretsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedSecretsNextResults(ctx context.Context, lastResults DeletedSecretListResult) (result DeletedSecretListResult, err error) { - req, err := lastResults.deletedSecretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedSecretsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedSecretsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedSecrets(ctx, vaultBaseURL, maxresults) - return -} - -// GetDeletedStorageAccount the Get Deleted Storage Account operation returns the specified deleted storage account -// along with its attributes. This operation requires the storage/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) GetDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccount", err.Error()) - } - - req, err := client.GetDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedStorageAccountPreparer prepares the GetDeletedStorageAccount request. -func (client BaseClient) GetDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedStorageAccountSender sends the GetDeletedStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedStorageAccountResponder handles the response to the GetDeletedStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedStorageAccounts the Get Deleted Storage Accounts operation returns the storage accounts that have been -// deleted for a vault enabled for soft-delete. This operation requires the storage/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") - defer func() { - sc := -1 - if result.dslr.Response.Response != nil { - sc = result.dslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccounts", err.Error()) - } - - result.fn = client.getDeletedStorageAccountsNextResults - req, err := client.GetDeletedStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedStorageAccountsSender(req) - if err != nil { - result.dslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure sending request") - return - } - - result.dslr, err = client.GetDeletedStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure responding to request") - return - } - if result.dslr.hasNextLink() && result.dslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedStorageAccountsPreparer prepares the GetDeletedStorageAccounts request. -func (client BaseClient) GetDeletedStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedstorage"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedStorageAccountsSender sends the GetDeletedStorageAccounts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedStorageAccountsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedStorageAccountsResponder handles the response to the GetDeletedStorageAccounts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedStorageAccountsResponder(resp *http.Response) (result DeletedStorageListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedStorageAccountsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedStorageAccountsNextResults(ctx context.Context, lastResults DeletedStorageListResult) (result DeletedStorageListResult, err error) { - req, err := lastResults.deletedStorageListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedStorageAccountsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedStorageAccounts(ctx, vaultBaseURL, maxresults) - return -} - -// GetKey the get key operation is applicable to all key types. If the requested key is symmetric, then no key material -// is released in the response. This operation requires the keys/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key to get. -// keyVersion - adding the version parameter retrieves a specific version of a key. This URI fragment is -// optional. If not specified, the latest version of the key is returned. -func (client BaseClient) GetKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure sending request") - return - } - - result, err = client.GetKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure responding to request") - return - } - - return -} - -// GetKeyPreparer prepares the GetKey request. -func (client BaseClient) GetKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeySender sends the GetKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetKeyResponder handles the response to the GetKey request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part of a -// stored key. The LIST operation is applicable to all key types, however only the base key identifier, attributes, and -// tags are provided in the response. Individual versions of a key are not listed in the response. This operation -// requires the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") - defer func() { - sc := -1 - if result.klr.Response.Response != nil { - sc = result.klr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetKeys", err.Error()) - } - - result.fn = client.getKeysNextResults - req, err := client.GetKeysPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeysSender(req) - if err != nil { - result.klr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure sending request") - return - } - - result.klr, err = client.GetKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure responding to request") - return - } - if result.klr.hasNextLink() && result.klr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetKeysPreparer prepares the GetKeys request. -func (client BaseClient) GetKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/keys"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeysSender sends the GetKeys request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetKeysResponder handles the response to the GetKeys request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeysResponder(resp *http.Response) (result KeyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getKeysNextResults retrieves the next set of results, if any. -func (client BaseClient) getKeysNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { - req, err := lastResults.keyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure sending next results request") - } - result, err = client.GetKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetKeysComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetKeys(ctx, vaultBaseURL, maxresults) - return -} - -// GetKeyVersions the full key identifier, attributes, and tags are provided in the response. This operation requires -// the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetKeyVersions(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") - defer func() { - sc := -1 - if result.klr.Response.Response != nil { - sc = result.klr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetKeyVersions", err.Error()) - } - - result.fn = client.getKeyVersionsNextResults - req, err := client.GetKeyVersionsPreparer(ctx, vaultBaseURL, keyName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeyVersionsSender(req) - if err != nil { - result.klr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure sending request") - return - } - - result.klr, err = client.GetKeyVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure responding to request") - return - } - if result.klr.hasNextLink() && result.klr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetKeyVersionsPreparer prepares the GetKeyVersions request. -func (client BaseClient) GetKeyVersionsPreparer(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeyVersionsSender sends the GetKeyVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeyVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetKeyVersionsResponder handles the response to the GetKeyVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeyVersionsResponder(resp *http.Response) (result KeyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getKeyVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getKeyVersionsNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { - req, err := lastResults.keyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetKeyVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetKeyVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetKeyVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetKeyVersionsComplete(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetKeyVersions(ctx, vaultBaseURL, keyName, maxresults) - return -} - -// GetSasDefinition gets information about a SAS definition for the specified storage account. This operation requires -// the storage/getsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) GetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSasDefinition", err.Error()) - } - - req, err := client.GetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.GetSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.GetSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// GetSasDefinitionPreparer prepares the GetSasDefinition request. -func (client BaseClient) GetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSasDefinitionSender sends the GetSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSasDefinitionResponder handles the response to the GetSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSasDefinitions list storage SAS definitions for the given storage account. This operation requires the -// storage/listsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") - defer func() { - sc := -1 - if result.sdlr.Response.Response != nil { - sc = result.sdlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSasDefinitions", err.Error()) - } - - result.fn = client.getSasDefinitionsNextResults - req, err := client.GetSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", nil, "Failure preparing request") - return - } - - resp, err := client.GetSasDefinitionsSender(req) - if err != nil { - result.sdlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure sending request") - return - } - - result.sdlr, err = client.GetSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure responding to request") - return - } - if result.sdlr.hasNextLink() && result.sdlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetSasDefinitionsPreparer prepares the GetSasDefinitions request. -func (client BaseClient) GetSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSasDefinitionsSender sends the GetSasDefinitions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSasDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSasDefinitionsResponder handles the response to the GetSasDefinitions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSasDefinitionsResponder(resp *http.Response) (result SasDefinitionListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSasDefinitionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSasDefinitionsNextResults(ctx context.Context, lastResults SasDefinitionListResult) (result SasDefinitionListResult, err error) { - req, err := lastResults.sasDefinitionListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSasDefinitionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) - return -} - -// GetSecret the GET operation is applicable to any secret stored in Azure Key Vault. This operation requires the -// secrets/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// secretVersion - the version of the secret. This URI fragment is optional. If not specified, the latest -// version of the secret is returned. -func (client BaseClient) GetSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure sending request") - return - } - - result, err = client.GetSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure responding to request") - return - } - - return -} - -// GetSecretPreparer prepares the GetSecret request. -func (client BaseClient) GetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - "secret-version": autorest.Encode("path", secretVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretSender sends the GetSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSecretResponder handles the response to the GetSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSecrets the Get Secrets operation is applicable to the entire vault. However, only the base secret identifier and -// its attributes are provided in the response. Individual secret versions are not listed in the response. This -// operation requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified, the service will return up to -// 25 results. -func (client BaseClient) GetSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSecrets", err.Error()) - } - - result.fn = client.getSecretsNextResults - req, err := client.GetSecretsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure sending request") - return - } - - result.slr, err = client.GetSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetSecretsPreparer prepares the GetSecrets request. -func (client BaseClient) GetSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/secrets"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretsSender sends the GetSecrets request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSecretsResponder handles the response to the GetSecrets request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretsResponder(resp *http.Response) (result SecretListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSecretsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSecretsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { - req, err := lastResults.secretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSecretsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSecretsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSecrets(ctx, vaultBaseURL, maxresults) - return -} - -// GetSecretVersions the full secret identifier and attributes are provided in the response. No values are returned for -// the secrets. This operations requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// maxresults - maximum number of results to return in a page. If not specified, the service will return up to -// 25 results. -func (client BaseClient) GetSecretVersions(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSecretVersions", err.Error()) - } - - result.fn = client.getSecretVersionsNextResults - req, err := client.GetSecretVersionsPreparer(ctx, vaultBaseURL, secretName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretVersionsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure sending request") - return - } - - result.slr, err = client.GetSecretVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetSecretVersionsPreparer prepares the GetSecretVersions request. -func (client BaseClient) GetSecretVersionsPreparer(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretVersionsSender sends the GetSecretVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSecretVersionsResponder handles the response to the GetSecretVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretVersionsResponder(resp *http.Response) (result SecretListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSecretVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSecretVersionsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { - req, err := lastResults.secretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSecretVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSecretVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSecretVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSecretVersionsComplete(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSecretVersions(ctx, vaultBaseURL, secretName, maxresults) - return -} - -// GetStorageAccount gets information about a specified storage account. This operation requires the storage/get -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) GetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetStorageAccount", err.Error()) - } - - req, err := client.GetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.GetStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.GetStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// GetStorageAccountPreparer prepares the GetStorageAccount request. -func (client BaseClient) GetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStorageAccountSender sends the GetStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetStorageAccountResponder handles the response to the GetStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) GetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetStorageAccounts list storage accounts managed by the specified key vault. This operation requires the -// storage/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetStorageAccounts", err.Error()) - } - - result.fn = client.getStorageAccountsNextResults - req, err := client.GetStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", nil, "Failure preparing request") - return - } - - resp, err := client.GetStorageAccountsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure sending request") - return - } - - result.slr, err = client.GetStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetStorageAccountsPreparer prepares the GetStorageAccounts request. -func (client BaseClient) GetStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/storage"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStorageAccountsSender sends the GetStorageAccounts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetStorageAccountsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetStorageAccountsResponder handles the response to the GetStorageAccounts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetStorageAccountsResponder(resp *http.Response) (result StorageListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getStorageAccountsNextResults retrieves the next set of results, if any. -func (client BaseClient) getStorageAccountsNextResults(ctx context.Context, lastResults StorageListResult) (result StorageListResult, err error) { - req, err := lastResults.storageListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetStorageAccountsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetStorageAccounts(ctx, vaultBaseURL, maxresults) - return -} - -// ImportCertificate imports an existing valid certificate, containing a private key, into Azure Key Vault. The -// certificate to be imported can be in either PFX or PEM format. If the certificate is in PEM format the PEM file must -// contain the key as well as x509 certificates. This operation requires the certificates/import permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to import the certificate. -func (client BaseClient) ImportCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: certificateName, - Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Base64EncodedCertificate", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "ImportCertificate", err.Error()) - } - - req, err := client.ImportCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.ImportCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure sending request") - return - } - - result, err = client.ImportCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure responding to request") - return - } - - return -} - -// ImportCertificatePreparer prepares the ImportCertificate request. -func (client BaseClient) ImportCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/import", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ImportCertificateSender sends the ImportCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) ImportCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ImportCertificateResponder handles the response to the ImportCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) ImportCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ImportKey the import key operation may be used to import any key type into an Azure Key Vault. If the named key -// already exists, Azure Key Vault creates a new version of the key. This operation requires the keys/import -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - name for the imported key. -// parameters - the parameters to import a key. -func (client BaseClient) ImportKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: keyName, - Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Key", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "ImportKey", err.Error()) - } - - req, err := client.ImportKeyPreparer(ctx, vaultBaseURL, keyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", nil, "Failure preparing request") - return - } - - resp, err := client.ImportKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure sending request") - return - } - - result, err = client.ImportKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure responding to request") - return - } - - return -} - -// ImportKeyPreparer prepares the ImportKey request. -func (client BaseClient) ImportKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ImportKeySender sends the ImportKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) ImportKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ImportKeyResponder handles the response to the ImportKey request. The method always -// closes the http.Response Body. -func (client BaseClient) ImportKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// MergeCertificate the MergeCertificate operation performs the merging of a certificate or certificate chain with a -// key pair currently available in the service. This operation requires the certificates/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to merge certificate. -func (client BaseClient) MergeCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.MergeCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.X509Certificates", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "MergeCertificate", err.Error()) - } - - req, err := client.MergeCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.MergeCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure sending request") - return - } - - result, err = client.MergeCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure responding to request") - return - } - - return -} - -// MergeCertificatePreparer prepares the MergeCertificate request. -func (client BaseClient) MergeCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending/merge", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// MergeCertificateSender sends the MergeCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) MergeCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// MergeCertificateResponder handles the response to the MergeCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) MergeCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// PurgeDeletedCertificate the PurgeDeletedCertificate operation performs an irreversible deletion of the specified -// certificate, without possibility for recovery. The operation is not available if the recovery level does not specify -// 'Purgeable'. This operation requires the certificate/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate -func (client BaseClient) PurgeDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedCertificate") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedCertificateSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedCertificatePreparer prepares the PurgeDeletedCertificate request. -func (client BaseClient) PurgeDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedCertificateSender sends the PurgeDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedCertificateResponder handles the response to the PurgeDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedCertificateResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedKey the Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the operation -// can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation -// requires the keys/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key -func (client BaseClient) PurgeDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedKey") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedKeySender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedKeyPreparer prepares the PurgeDeletedKey request. -func (client BaseClient) PurgeDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedKeySender sends the PurgeDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedKeyResponder handles the response to the PurgeDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedKeyResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedSecret the purge deleted secret operation removes the secret permanently, without the possibility of -// recovery. This operation can only be enabled on a soft-delete enabled vault. This operation requires the -// secrets/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) PurgeDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedSecret") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedSecretSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedSecretPreparer prepares the PurgeDeletedSecret request. -func (client BaseClient) PurgeDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedSecretSender sends the PurgeDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedSecretResponder handles the response to the PurgeDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedSecretResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedStorageAccount the purge deleted storage account operation removes the secret permanently, without the -// possibility of recovery. This operation can only be performed on a soft-delete enabled vault. This operation -// requires the storage/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) PurgeDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedStorageAccount") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "PurgeDeletedStorageAccount", err.Error()) - } - - req, err := client.PurgeDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedStorageAccountSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedStorageAccountPreparer prepares the PurgeDeletedStorageAccount request. -func (client BaseClient) PurgeDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedStorageAccountSender sends the PurgeDeletedStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedStorageAccountResponder handles the response to the PurgeDeletedStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedStorageAccountResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// RecoverDeletedCertificate the RecoverDeletedCertificate operation performs the reversal of the Delete operation. The -// operation is applicable in vaults enabled for soft-delete, and must be issued during the retention interval -// (available in the deleted certificate's attributes). This operation requires the certificates/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the deleted certificate -func (client BaseClient) RecoverDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedCertificatePreparer prepares the RecoverDeletedCertificate request. -func (client BaseClient) RecoverDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedCertificateSender sends the RecoverDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedCertificateResponder handles the response to the RecoverDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedKey the Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults. It -// recovers the deleted key back to its latest version under /keys. An attempt to recover an non-deleted key will -// return an error. Consider this the inverse of the delete operation on soft-delete enabled vaults. This operation -// requires the keys/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the deleted key. -func (client BaseClient) RecoverDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedKeyPreparer prepares the RecoverDeletedKey request. -func (client BaseClient) RecoverDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedKeySender sends the RecoverDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedKeyResponder handles the response to the RecoverDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedSasDefinition recovers the deleted SAS definition for the specified storage account. This operation -// can only be performed on a soft-delete enabled vault. This operation requires the storage/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) RecoverDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedSasDefinition", err.Error()) - } - - req, err := client.RecoverDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedSasDefinitionPreparer prepares the RecoverDeletedSasDefinition request. -func (client BaseClient) RecoverDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedSasDefinitionSender sends the RecoverDeletedSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedSasDefinitionResponder handles the response to the RecoverDeletedSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedSecret recovers the deleted secret in the specified vault. This operation can only be performed on a -// soft-delete enabled vault. This operation requires the secrets/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the deleted secret. -func (client BaseClient) RecoverDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedSecretPreparer prepares the RecoverDeletedSecret request. -func (client BaseClient) RecoverDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedSecretSender sends the RecoverDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedSecretResponder handles the response to the RecoverDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedStorageAccount recovers the deleted storage account in the specified vault. This operation can only be -// performed on a soft-delete enabled vault. This operation requires the storage/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) RecoverDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedStorageAccount", err.Error()) - } - - req, err := client.RecoverDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedStorageAccountPreparer prepares the RecoverDeletedStorageAccount request. -func (client BaseClient) RecoverDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedStorageAccountSender sends the RecoverDeletedStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedStorageAccountResponder handles the response to the RecoverDeletedStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RegenerateStorageAccountKey regenerates the specified key value for the given storage account. This operation -// requires the storage/regeneratekey permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to regenerate storage account key. -func (client BaseClient) RegenerateStorageAccountKey(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RegenerateStorageAccountKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RegenerateStorageAccountKey", err.Error()) - } - - req, err := client.RegenerateStorageAccountKeyPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", nil, "Failure preparing request") - return - } - - resp, err := client.RegenerateStorageAccountKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure sending request") - return - } - - result, err = client.RegenerateStorageAccountKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure responding to request") - return - } - - return -} - -// RegenerateStorageAccountKeyPreparer prepares the RegenerateStorageAccountKey request. -func (client BaseClient) RegenerateStorageAccountKeyPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/regeneratekey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RegenerateStorageAccountKeySender sends the RegenerateStorageAccountKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RegenerateStorageAccountKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RegenerateStorageAccountKeyResponder handles the response to the RegenerateStorageAccountKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RegenerateStorageAccountKeyResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreCertificate restores a backed up certificate, and all its versions, to a vault. This operation requires the -// certificates/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the certificate. -func (client BaseClient) RestoreCertificate(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CertificateBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreCertificate", err.Error()) - } - - req, err := client.RestoreCertificatePreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure sending request") - return - } - - result, err = client.RestoreCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure responding to request") - return - } - - return -} - -// RestoreCertificatePreparer prepares the RestoreCertificate request. -func (client BaseClient) RestoreCertificatePreparer(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreCertificateSender sends the RestoreCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreCertificateResponder handles the response to the RestoreCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreKey imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier, -// attributes and access control policies. The RESTORE operation may be used to import a previously backed up key. -// Individual versions of a key cannot be restored. The key is restored in its entirety with the same key name as it -// had when it was backed up. If the key name is not available in the target Key Vault, the RESTORE operation will be -// rejected. While the key name is retained during restore, the final key identifier will change if the key is restored -// to a different vault. Restore will restore all versions and preserve version identifiers. The RESTORE operation is -// subject to security constraints: The target Key Vault must be owned by the same Microsoft Azure Subscription as the -// source Key Vault The user must have RESTORE permission in the target Key Vault. This operation requires the -// keys/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the key. -func (client BaseClient) RestoreKey(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.KeyBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreKey", err.Error()) - } - - req, err := client.RestoreKeyPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure sending request") - return - } - - result, err = client.RestoreKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure responding to request") - return - } - - return -} - -// RestoreKeyPreparer prepares the RestoreKey request. -func (client BaseClient) RestoreKeyPreparer(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/keys/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreKeySender sends the RestoreKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreKeyResponder handles the response to the RestoreKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreSecret restores a backed up secret, and all its versions, to a vault. This operation requires the -// secrets/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the secret. -func (client BaseClient) RestoreSecret(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.SecretBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreSecret", err.Error()) - } - - req, err := client.RestoreSecretPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure sending request") - return - } - - result, err = client.RestoreSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure responding to request") - return - } - - return -} - -// RestoreSecretPreparer prepares the RestoreSecret request. -func (client BaseClient) RestoreSecretPreparer(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/secrets/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreSecretSender sends the RestoreSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreSecretResponder handles the response to the RestoreSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreStorageAccount restores a backed up storage account to a vault. This operation requires the storage/restore -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the storage account. -func (client BaseClient) RestoreStorageAccount(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.StorageBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreStorageAccount", err.Error()) - } - - req, err := client.RestoreStorageAccountPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.RestoreStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// RestoreStorageAccountPreparer prepares the RestoreStorageAccount request. -func (client BaseClient) RestoreStorageAccountPreparer(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/storage/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreStorageAccountSender sends the RestoreStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreStorageAccountResponder handles the response to the RestoreStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetCertificateContacts sets the certificate contacts for the specified key vault. This operation requires the -// certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// contacts - the contacts for the key vault certificate. -func (client BaseClient) SetCertificateContacts(ctx context.Context, vaultBaseURL string, contacts Contacts) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.SetCertificateContactsPreparer(ctx, vaultBaseURL, contacts) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.SetCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.SetCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure responding to request") - return - } - - return -} - -// SetCertificateContactsPreparer prepares the SetCertificateContacts request. -func (client BaseClient) SetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string, contacts Contacts) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - contacts.ID = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithJSON(contacts), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetCertificateContactsSender sends the SetCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetCertificateContactsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetCertificateContactsResponder handles the response to the SetCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) SetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetCertificateIssuer the SetCertificateIssuer operation adds or updates the specified certificate issuer. This -// operation requires the certificates/setissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -// parameter - certificate issuer set parameter. -func (client BaseClient) SetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameter, - Constraints: []validation.Constraint{{Target: "parameter.Provider", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetCertificateIssuer", err.Error()) - } - - req, err := client.SetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.SetCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.SetCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// SetCertificateIssuerPreparer prepares the SetCertificateIssuer request. -func (client BaseClient) SetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithJSON(parameter), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetCertificateIssuerSender sends the SetCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetCertificateIssuerResponder handles the response to the SetCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) SetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetSasDefinition creates or updates a new SAS definition for the specified storage account. This operation requires -// the storage/setsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -// parameters - the parameters to create a SAS definition. -func (client BaseClient) SetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.TemplateURI", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ValidityPeriod", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetSasDefinition", err.Error()) - } - - req, err := client.SetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.SetSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.SetSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// SetSasDefinitionPreparer prepares the SetSasDefinition request. -func (client BaseClient) SetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetSasDefinitionSender sends the SetSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetSasDefinitionResponder handles the response to the SetSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) SetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetSecret the SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key -// Vault creates a new version of that secret. This operation requires the secrets/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// parameters - the parameters for setting the secret. -func (client BaseClient) SetSecret(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: secretName, - Constraints: []validation.Constraint{{Target: "secretName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetSecret", err.Error()) - } - - req, err := client.SetSecretPreparer(ctx, vaultBaseURL, secretName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", nil, "Failure preparing request") - return - } - - resp, err := client.SetSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure sending request") - return - } - - result, err = client.SetSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure responding to request") - return - } - - return -} - -// SetSecretPreparer prepares the SetSecret request. -func (client BaseClient) SetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetSecretSender sends the SetSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetSecretResponder handles the response to the SetSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) SetSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetStorageAccount creates or updates a new storage account. This operation requires the storage/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to create a storage account. -func (client BaseClient) SetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.ResourceID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ActiveKeyName", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AutoRegenerateKey", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetStorageAccount", err.Error()) - } - - req, err := client.SetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.SetStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.SetStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// SetStorageAccountPreparer prepares the SetStorageAccount request. -func (client BaseClient) SetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetStorageAccountSender sends the SetStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetStorageAccountResponder handles the response to the SetStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) SetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Sign the SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault since this -// operation uses the private portion of the key. This operation requires the keys/sign permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the signing operation. -func (client BaseClient) Sign(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Sign") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Sign", err.Error()) - } - - req, err := client.SignPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", nil, "Failure preparing request") - return - } - - resp, err := client.SignSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure sending request") - return - } - - result, err = client.SignResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure responding to request") - return - } - - return -} - -// SignPreparer prepares the Sign request. -func (client BaseClient) SignPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/sign", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SignSender sends the Sign request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SignSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SignResponder handles the response to the Sign request. The method always -// closes the http.Response Body. -func (client BaseClient) SignResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UnwrapKey the UNWRAP operation supports decryption of a symmetric key using the target key encryption key. This -// operation is the reverse of the WRAP operation. The UNWRAP operation applies to asymmetric and symmetric keys stored -// in Azure Key Vault since it uses the private portion of the key. This operation requires the keys/unwrapKey -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the key operation. -func (client BaseClient) UnwrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UnwrapKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UnwrapKey", err.Error()) - } - - req, err := client.UnwrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", nil, "Failure preparing request") - return - } - - resp, err := client.UnwrapKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure sending request") - return - } - - result, err = client.UnwrapKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure responding to request") - return - } - - return -} - -// UnwrapKeyPreparer prepares the UnwrapKey request. -func (client BaseClient) UnwrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/unwrapkey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UnwrapKeySender sends the UnwrapKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UnwrapKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UnwrapKeyResponder handles the response to the UnwrapKey request. The method always -// closes the http.Response Body. -func (client BaseClient) UnwrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificate the UpdateCertificate operation applies the specified update on the given certificate; the only -// elements updated are the certificate's attributes. This operation requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given key vault. -// certificateVersion - the version of the certificate. -// parameters - the parameters for certificate update. -func (client BaseClient) UpdateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificatePreparer prepares the UpdateCertificate request. -func (client BaseClient) UpdateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - "certificate-version": autorest.Encode("path", certificateVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateSender sends the UpdateCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificateResponder handles the response to the UpdateCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificateIssuer the UpdateCertificateIssuer operation performs an update on the specified certificate issuer -// entity. This operation requires the certificates/setissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -// parameter - certificate issuer update parameter. -func (client BaseClient) UpdateCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificateIssuerPreparer prepares the UpdateCertificateIssuer request. -func (client BaseClient) UpdateCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithJSON(parameter), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateIssuerSender sends the UpdateCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificateIssuerResponder handles the response to the UpdateCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificateOperation updates a certificate creation operation that is already in progress. This operation -// requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// certificateOperation - the certificate operation response. -func (client BaseClient) UpdateCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificateOperationPreparer(ctx, vaultBaseURL, certificateName, certificateOperation) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificateOperationPreparer prepares the UpdateCertificateOperation request. -func (client BaseClient) UpdateCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithJSON(certificateOperation), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateOperationSender sends the UpdateCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificateOperationResponder handles the response to the UpdateCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificatePolicy set specified members in the certificate policy. Leave others as null. This operation -// requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given vault. -// certificatePolicy - the policy for the certificate. -func (client BaseClient) UpdateCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (result CertificatePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificatePolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName, certificatePolicy) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificatePolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificatePolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificatePolicyPreparer prepares the UpdateCertificatePolicy request. -func (client BaseClient) UpdateCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - certificatePolicy.ID = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), - autorest.WithJSON(certificatePolicy), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificatePolicySender sends the UpdateCertificatePolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificatePolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificatePolicyResponder handles the response to the UpdateCertificatePolicy request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateKey in order to perform this operation, the key must already exist in the Key Vault. Note: The cryptographic -// material of a key itself cannot be changed. This operation requires the keys/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of key to update. -// keyVersion - the version of the key to update. -// parameters - the parameters of the key to update. -func (client BaseClient) UpdateKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure sending request") - return - } - - result, err = client.UpdateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure responding to request") - return - } - - return -} - -// UpdateKeyPreparer prepares the UpdateKey request. -func (client BaseClient) UpdateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateKeySender sends the UpdateKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateKeyResponder handles the response to the UpdateKey request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSasDefinition updates the specified attributes associated with the given SAS definition. This operation -// requires the storage/setsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -// parameters - the parameters to update a SAS definition. -func (client BaseClient) UpdateSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UpdateSasDefinition", err.Error()) - } - - req, err := client.UpdateSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.UpdateSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// UpdateSasDefinitionPreparer prepares the UpdateSasDefinition request. -func (client BaseClient) UpdateSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSasDefinitionSender sends the UpdateSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateSasDefinitionResponder handles the response to the UpdateSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSecret the UPDATE operation changes specified attributes of an existing stored secret. Attributes that are not -// specified in the request are left unchanged. The value of a secret itself cannot be changed. This operation requires -// the secrets/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// secretVersion - the version of the secret. -// parameters - the parameters for update secret operation. -func (client BaseClient) UpdateSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure sending request") - return - } - - result, err = client.UpdateSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure responding to request") - return - } - - return -} - -// UpdateSecretPreparer prepares the UpdateSecret request. -func (client BaseClient) UpdateSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - "secret-version": autorest.Encode("path", secretVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSecretSender sends the UpdateSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateSecretResponder handles the response to the UpdateSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateStorageAccount updates the specified attributes associated with the given storage account. This operation -// requires the storage/set/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to update a storage account. -func (client BaseClient) UpdateStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UpdateStorageAccount", err.Error()) - } - - req, err := client.UpdateStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.UpdateStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// UpdateStorageAccountPreparer prepares the UpdateStorageAccount request. -func (client BaseClient) UpdateStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateStorageAccountSender sends the UpdateStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateStorageAccountResponder handles the response to the UpdateStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Verify the VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not strictly -// necessary for asymmetric keys stored in Azure Key Vault since signature verification can be performed using the -// public portion of the key but this operation is supported as a convenience for callers that only have a -// key-reference and not the public portion of the key. This operation requires the keys/verify permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for verify operations. -func (client BaseClient) Verify(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (result KeyVerifyResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Verify") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Digest", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Signature", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Verify", err.Error()) - } - - req, err := client.VerifyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", nil, "Failure preparing request") - return - } - - resp, err := client.VerifySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure sending request") - return - } - - result, err = client.VerifyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure responding to request") - return - } - - return -} - -// VerifyPreparer prepares the Verify request. -func (client BaseClient) VerifyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/verify", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// VerifySender sends the Verify request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) VerifySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// VerifyResponder handles the response to the Verify request. The method always -// closes the http.Response Body. -func (client BaseClient) VerifyResponder(resp *http.Response) (result KeyVerifyResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// WrapKey the WRAP operation supports encryption of a symmetric key using a key encryption key that has previously -// been stored in an Azure Key Vault. The WRAP operation is only strictly necessary for symmetric keys stored in Azure -// Key Vault since protection with an asymmetric key can be performed using the public portion of the key. This -// operation is supported for asymmetric keys as a convenience for callers that have a key-reference but do not have -// access to the public key material. This operation requires the keys/wrapKey permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for wrap operation. -func (client BaseClient) WrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.WrapKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "WrapKey", err.Error()) - } - - req, err := client.WrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", nil, "Failure preparing request") - return - } - - resp, err := client.WrapKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure sending request") - return - } - - result, err = client.WrapKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure responding to request") - return - } - - return -} - -// WrapKeyPreparer prepares the WrapKey request. -func (client BaseClient) WrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/wrapkey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// WrapKeySender sends the WrapKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) WrapKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// WrapKeyResponder handles the response to the WrapKey request. The method always -// closes the http.Response Body. -func (client BaseClient) WrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json deleted file mode 100644 index 311f23dc3c4..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", - "readme": "/_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", - "tag": "package-7.1", - "use": "@microsoft.azure/autorest.go@2.1.183", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-7.1 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go deleted file mode 100644 index 5c4dbbcec28..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go +++ /dev/null @@ -1,231 +0,0 @@ -package keyvault - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// ActionType enumerates the values for action type. -type ActionType string - -const ( - // AutoRenew ... - AutoRenew ActionType = "AutoRenew" - // EmailContacts ... - EmailContacts ActionType = "EmailContacts" -) - -// PossibleActionTypeValues returns an array of possible values for the ActionType const type. -func PossibleActionTypeValues() []ActionType { - return []ActionType{AutoRenew, EmailContacts} -} - -// DeletionRecoveryLevel enumerates the values for deletion recovery level. -type DeletionRecoveryLevel string - -const ( - // CustomizedRecoverable Denotes a vault state in which deletion is recoverable without the possibility for - // immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90).This level - // guarantees the recoverability of the deleted entity during the retention interval and while the - // subscription is still available. - CustomizedRecoverable DeletionRecoveryLevel = "CustomizedRecoverable" - // CustomizedRecoverableProtectedSubscription Denotes a vault and subscription state in which deletion is - // recoverable, immediate and permanent deletion (i.e. purge) is not permitted, and in which the - // subscription itself cannot be permanently canceled when 7<= SoftDeleteRetentionInDays < 90. This level - // guarantees the recoverability of the deleted entity during the retention interval, and also reflects the - // fact that the subscription itself cannot be cancelled. - CustomizedRecoverableProtectedSubscription DeletionRecoveryLevel = "CustomizedRecoverable+ProtectedSubscription" - // CustomizedRecoverablePurgeable Denotes a vault state in which deletion is recoverable, and which also - // permits immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90). This - // level guarantees the recoverability of the deleted entity during the retention interval, unless a Purge - // operation is requested, or the subscription is cancelled. - CustomizedRecoverablePurgeable DeletionRecoveryLevel = "CustomizedRecoverable+Purgeable" - // Purgeable Denotes a vault state in which deletion is an irreversible operation, without the possibility - // for recovery. This level corresponds to no protection being available against a Delete operation; the - // data is irretrievably lost upon accepting a Delete operation at the entity level or higher (vault, - // resource group, subscription etc.) - Purgeable DeletionRecoveryLevel = "Purgeable" - // Recoverable Denotes a vault state in which deletion is recoverable without the possibility for immediate - // and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity - // during the retention interval(90 days) and while the subscription is still available. System wil - // permanently delete it after 90 days, if not recovered - Recoverable DeletionRecoveryLevel = "Recoverable" - // RecoverableProtectedSubscription Denotes a vault and subscription state in which deletion is recoverable - // within retention interval (90 days), immediate and permanent deletion (i.e. purge) is not permitted, and - // in which the subscription itself cannot be permanently canceled. System wil permanently delete it after - // 90 days, if not recovered - RecoverableProtectedSubscription DeletionRecoveryLevel = "Recoverable+ProtectedSubscription" - // RecoverablePurgeable Denotes a vault state in which deletion is recoverable, and which also permits - // immediate and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted - // entity during the retention interval (90 days), unless a Purge operation is requested, or the - // subscription is cancelled. System wil permanently delete it after 90 days, if not recovered - RecoverablePurgeable DeletionRecoveryLevel = "Recoverable+Purgeable" -) - -// PossibleDeletionRecoveryLevelValues returns an array of possible values for the DeletionRecoveryLevel const type. -func PossibleDeletionRecoveryLevelValues() []DeletionRecoveryLevel { - return []DeletionRecoveryLevel{CustomizedRecoverable, CustomizedRecoverableProtectedSubscription, CustomizedRecoverablePurgeable, Purgeable, Recoverable, RecoverableProtectedSubscription, RecoverablePurgeable} -} - -// JSONWebKeyCurveName enumerates the values for json web key curve name. -type JSONWebKeyCurveName string - -const ( - // P256 ... - P256 JSONWebKeyCurveName = "P-256" - // P256K ... - P256K JSONWebKeyCurveName = "P-256K" - // P384 ... - P384 JSONWebKeyCurveName = "P-384" - // P521 ... - P521 JSONWebKeyCurveName = "P-521" -) - -// PossibleJSONWebKeyCurveNameValues returns an array of possible values for the JSONWebKeyCurveName const type. -func PossibleJSONWebKeyCurveNameValues() []JSONWebKeyCurveName { - return []JSONWebKeyCurveName{P256, P256K, P384, P521} -} - -// JSONWebKeyEncryptionAlgorithm enumerates the values for json web key encryption algorithm. -type JSONWebKeyEncryptionAlgorithm string - -const ( - // RSA15 ... - RSA15 JSONWebKeyEncryptionAlgorithm = "RSA1_5" - // RSAOAEP ... - RSAOAEP JSONWebKeyEncryptionAlgorithm = "RSA-OAEP" - // RSAOAEP256 ... - RSAOAEP256 JSONWebKeyEncryptionAlgorithm = "RSA-OAEP-256" -) - -// PossibleJSONWebKeyEncryptionAlgorithmValues returns an array of possible values for the JSONWebKeyEncryptionAlgorithm const type. -func PossibleJSONWebKeyEncryptionAlgorithmValues() []JSONWebKeyEncryptionAlgorithm { - return []JSONWebKeyEncryptionAlgorithm{RSA15, RSAOAEP, RSAOAEP256} -} - -// JSONWebKeyOperation enumerates the values for json web key operation. -type JSONWebKeyOperation string - -const ( - // Decrypt ... - Decrypt JSONWebKeyOperation = "decrypt" - // Encrypt ... - Encrypt JSONWebKeyOperation = "encrypt" - // Import ... - Import JSONWebKeyOperation = "import" - // Sign ... - Sign JSONWebKeyOperation = "sign" - // UnwrapKey ... - UnwrapKey JSONWebKeyOperation = "unwrapKey" - // Verify ... - Verify JSONWebKeyOperation = "verify" - // WrapKey ... - WrapKey JSONWebKeyOperation = "wrapKey" -) - -// PossibleJSONWebKeyOperationValues returns an array of possible values for the JSONWebKeyOperation const type. -func PossibleJSONWebKeyOperationValues() []JSONWebKeyOperation { - return []JSONWebKeyOperation{Decrypt, Encrypt, Import, Sign, UnwrapKey, Verify, WrapKey} -} - -// JSONWebKeySignatureAlgorithm enumerates the values for json web key signature algorithm. -type JSONWebKeySignatureAlgorithm string - -const ( - // ES256 ECDSA using P-256 and SHA-256, as described in https://tools.ietf.org/html/rfc7518. - ES256 JSONWebKeySignatureAlgorithm = "ES256" - // ES256K ECDSA using P-256K and SHA-256, as described in https://tools.ietf.org/html/rfc7518 - ES256K JSONWebKeySignatureAlgorithm = "ES256K" - // ES384 ECDSA using P-384 and SHA-384, as described in https://tools.ietf.org/html/rfc7518 - ES384 JSONWebKeySignatureAlgorithm = "ES384" - // ES512 ECDSA using P-521 and SHA-512, as described in https://tools.ietf.org/html/rfc7518 - ES512 JSONWebKeySignatureAlgorithm = "ES512" - // PS256 RSASSA-PSS using SHA-256 and MGF1 with SHA-256, as described in - // https://tools.ietf.org/html/rfc7518 - PS256 JSONWebKeySignatureAlgorithm = "PS256" - // PS384 RSASSA-PSS using SHA-384 and MGF1 with SHA-384, as described in - // https://tools.ietf.org/html/rfc7518 - PS384 JSONWebKeySignatureAlgorithm = "PS384" - // PS512 RSASSA-PSS using SHA-512 and MGF1 with SHA-512, as described in - // https://tools.ietf.org/html/rfc7518 - PS512 JSONWebKeySignatureAlgorithm = "PS512" - // RS256 RSASSA-PKCS1-v1_5 using SHA-256, as described in https://tools.ietf.org/html/rfc7518 - RS256 JSONWebKeySignatureAlgorithm = "RS256" - // RS384 RSASSA-PKCS1-v1_5 using SHA-384, as described in https://tools.ietf.org/html/rfc7518 - RS384 JSONWebKeySignatureAlgorithm = "RS384" - // RS512 RSASSA-PKCS1-v1_5 using SHA-512, as described in https://tools.ietf.org/html/rfc7518 - RS512 JSONWebKeySignatureAlgorithm = "RS512" - // RSNULL Reserved - RSNULL JSONWebKeySignatureAlgorithm = "RSNULL" -) - -// PossibleJSONWebKeySignatureAlgorithmValues returns an array of possible values for the JSONWebKeySignatureAlgorithm const type. -func PossibleJSONWebKeySignatureAlgorithmValues() []JSONWebKeySignatureAlgorithm { - return []JSONWebKeySignatureAlgorithm{ES256, ES256K, ES384, ES512, PS256, PS384, PS512, RS256, RS384, RS512, RSNULL} -} - -// JSONWebKeyType enumerates the values for json web key type. -type JSONWebKeyType string - -const ( - // EC ... - EC JSONWebKeyType = "EC" - // ECHSM ... - ECHSM JSONWebKeyType = "EC-HSM" - // Oct ... - Oct JSONWebKeyType = "oct" - // RSA ... - RSA JSONWebKeyType = "RSA" - // RSAHSM ... - RSAHSM JSONWebKeyType = "RSA-HSM" -) - -// PossibleJSONWebKeyTypeValues returns an array of possible values for the JSONWebKeyType const type. -func PossibleJSONWebKeyTypeValues() []JSONWebKeyType { - return []JSONWebKeyType{EC, ECHSM, Oct, RSA, RSAHSM} -} - -// KeyUsageType enumerates the values for key usage type. -type KeyUsageType string - -const ( - // CRLSign ... - CRLSign KeyUsageType = "cRLSign" - // DataEncipherment ... - DataEncipherment KeyUsageType = "dataEncipherment" - // DecipherOnly ... - DecipherOnly KeyUsageType = "decipherOnly" - // DigitalSignature ... - DigitalSignature KeyUsageType = "digitalSignature" - // EncipherOnly ... - EncipherOnly KeyUsageType = "encipherOnly" - // KeyAgreement ... - KeyAgreement KeyUsageType = "keyAgreement" - // KeyCertSign ... - KeyCertSign KeyUsageType = "keyCertSign" - // KeyEncipherment ... - KeyEncipherment KeyUsageType = "keyEncipherment" - // NonRepudiation ... - NonRepudiation KeyUsageType = "nonRepudiation" -) - -// PossibleKeyUsageTypeValues returns an array of possible values for the KeyUsageType const type. -func PossibleKeyUsageTypeValues() []KeyUsageType { - return []KeyUsageType{CRLSign, DataEncipherment, DecipherOnly, DigitalSignature, EncipherOnly, KeyAgreement, KeyCertSign, KeyEncipherment, NonRepudiation} -} - -// SasTokenType enumerates the values for sas token type. -type SasTokenType string - -const ( - // Account ... - Account SasTokenType = "account" - // Service ... - Service SasTokenType = "service" -) - -// PossibleSasTokenTypeValues returns an array of possible values for the SasTokenType const type. -func PossibleSasTokenTypeValues() []SasTokenType { - return []SasTokenType{Account, Service} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go deleted file mode 100644 index 8ebcb0f24cf..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go +++ /dev/null @@ -1,3611 +0,0 @@ -package keyvault - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault" - -// Action the action that will be executed. -type Action struct { - // ActionType - The type of the action. Possible values include: 'EmailContacts', 'AutoRenew' - ActionType ActionType `json:"action_type,omitempty"` -} - -// AdministratorDetails details of the organization administrator of the certificate issuer. -type AdministratorDetails struct { - // FirstName - First name. - FirstName *string `json:"first_name,omitempty"` - // LastName - Last name. - LastName *string `json:"last_name,omitempty"` - // EmailAddress - Email address. - EmailAddress *string `json:"email,omitempty"` - // Phone - Phone number. - Phone *string `json:"phone,omitempty"` -} - -// Attributes the object attributes managed by the KeyVault service. -type Attributes struct { - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for Attributes. -func (a Attributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if a.Enabled != nil { - objectMap["enabled"] = a.Enabled - } - if a.NotBefore != nil { - objectMap["nbf"] = a.NotBefore - } - if a.Expires != nil { - objectMap["exp"] = a.Expires - } - return json.Marshal(objectMap) -} - -// BackupCertificateResult the backup certificate result, containing the backup blob. -type BackupCertificateResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up certificate. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupCertificateResult. -func (bcr BackupCertificateResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BackupKeyResult the backup key result, containing the backup blob. -type BackupKeyResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up key. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupKeyResult. -func (bkr BackupKeyResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BackupSecretResult the backup secret result, containing the backup blob. -type BackupSecretResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up secret. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupSecretResult. -func (bsr BackupSecretResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BackupStorageResult the backup storage result, containing the backup blob. -type BackupStorageResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up storage account. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupStorageResult. -func (bsr BackupStorageResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CertificateAttributes the certificate management attributes. -type CertificateAttributes struct { - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for certificates in the current vault. If it contains 'Purgeable', the certificate can be permanently deleted by a privileged user; otherwise, only the system can purge the certificate, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateAttributes. -func (ca CertificateAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ca.Enabled != nil { - objectMap["enabled"] = ca.Enabled - } - if ca.NotBefore != nil { - objectMap["nbf"] = ca.NotBefore - } - if ca.Expires != nil { - objectMap["exp"] = ca.Expires - } - return json.Marshal(objectMap) -} - -// CertificateBundle a certificate bundle consists of a certificate (X509) plus its attributes. -type CertificateBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // Kid - READ-ONLY; The key id. - Kid *string `json:"kid,omitempty"` - // Sid - READ-ONLY; The secret id. - Sid *string `json:"sid,omitempty"` - // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` - // Policy - READ-ONLY; The management policy. - Policy *CertificatePolicy `json:"policy,omitempty"` - // Cer - CER contents of x509 certificate. - Cer *[]byte `json:"cer,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateBundle. -func (cb CertificateBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cb.Cer != nil { - objectMap["cer"] = cb.Cer - } - if cb.ContentType != nil { - objectMap["contentType"] = cb.ContentType - } - if cb.Attributes != nil { - objectMap["attributes"] = cb.Attributes - } - if cb.Tags != nil { - objectMap["tags"] = cb.Tags - } - return json.Marshal(objectMap) -} - -// CertificateCreateParameters the certificate create parameters. -type CertificateCreateParameters struct { - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateCreateParameters. -func (ccp CertificateCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ccp.CertificatePolicy != nil { - objectMap["policy"] = ccp.CertificatePolicy - } - if ccp.CertificateAttributes != nil { - objectMap["attributes"] = ccp.CertificateAttributes - } - if ccp.Tags != nil { - objectMap["tags"] = ccp.Tags - } - return json.Marshal(objectMap) -} - -// CertificateImportParameters the certificate import parameters. -type CertificateImportParameters struct { - // Base64EncodedCertificate - A PEM file or a base64-encoded PFX file. PEM files need to contain the private key. - Base64EncodedCertificate *string `json:"value,omitempty"` - // Password - If the private key in base64EncodedCertificate is encrypted, the password used for encryption. - Password *string `json:"pwd,omitempty"` - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateImportParameters. -func (cip CertificateImportParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cip.Base64EncodedCertificate != nil { - objectMap["value"] = cip.Base64EncodedCertificate - } - if cip.Password != nil { - objectMap["pwd"] = cip.Password - } - if cip.CertificatePolicy != nil { - objectMap["policy"] = cip.CertificatePolicy - } - if cip.CertificateAttributes != nil { - objectMap["attributes"] = cip.CertificateAttributes - } - if cip.Tags != nil { - objectMap["tags"] = cip.Tags - } - return json.Marshal(objectMap) -} - -// CertificateIssuerItem the certificate issuer item containing certificate issuer metadata. -type CertificateIssuerItem struct { - // ID - Certificate Identifier. - ID *string `json:"id,omitempty"` - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` -} - -// CertificateIssuerListResult the certificate issuer list result. -type CertificateIssuerListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of certificate issuers in the key vault along with a link to the next page of certificate issuers. - Value *[]CertificateIssuerItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of certificate issuers. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateIssuerListResult. -func (cilr CertificateIssuerListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CertificateIssuerListResultIterator provides access to a complete listing of CertificateIssuerItem -// values. -type CertificateIssuerListResultIterator struct { - i int - page CertificateIssuerListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *CertificateIssuerListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *CertificateIssuerListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter CertificateIssuerListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter CertificateIssuerListResultIterator) Response() CertificateIssuerListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter CertificateIssuerListResultIterator) Value() CertificateIssuerItem { - if !iter.page.NotDone() { - return CertificateIssuerItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the CertificateIssuerListResultIterator type. -func NewCertificateIssuerListResultIterator(page CertificateIssuerListResultPage) CertificateIssuerListResultIterator { - return CertificateIssuerListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (cilr CertificateIssuerListResult) IsEmpty() bool { - return cilr.Value == nil || len(*cilr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (cilr CertificateIssuerListResult) hasNextLink() bool { - return cilr.NextLink != nil && len(*cilr.NextLink) != 0 -} - -// certificateIssuerListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (cilr CertificateIssuerListResult) certificateIssuerListResultPreparer(ctx context.Context) (*http.Request, error) { - if !cilr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(cilr.NextLink))) -} - -// CertificateIssuerListResultPage contains a page of CertificateIssuerItem values. -type CertificateIssuerListResultPage struct { - fn func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error) - cilr CertificateIssuerListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *CertificateIssuerListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.cilr) - if err != nil { - return err - } - page.cilr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *CertificateIssuerListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page CertificateIssuerListResultPage) NotDone() bool { - return !page.cilr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page CertificateIssuerListResultPage) Response() CertificateIssuerListResult { - return page.cilr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page CertificateIssuerListResultPage) Values() []CertificateIssuerItem { - if page.cilr.IsEmpty() { - return nil - } - return *page.cilr.Value -} - -// Creates a new instance of the CertificateIssuerListResultPage type. -func NewCertificateIssuerListResultPage(cur CertificateIssuerListResult, getNextPage func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error)) CertificateIssuerListResultPage { - return CertificateIssuerListResultPage{ - fn: getNextPage, - cilr: cur, - } -} - -// CertificateIssuerSetParameters the certificate issuer set parameters. -type CertificateIssuerSetParameters struct { - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// CertificateIssuerUpdateParameters the certificate issuer update parameters. -type CertificateIssuerUpdateParameters struct { - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// CertificateItem the certificate item containing certificate metadata. -type CertificateItem struct { - // ID - Certificate identifier. - ID *string `json:"id,omitempty"` - // Attributes - The certificate management attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateItem. -func (ci CertificateItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ci.ID != nil { - objectMap["id"] = ci.ID - } - if ci.Attributes != nil { - objectMap["attributes"] = ci.Attributes - } - if ci.Tags != nil { - objectMap["tags"] = ci.Tags - } - if ci.X509Thumbprint != nil { - objectMap["x5t"] = ci.X509Thumbprint - } - return json.Marshal(objectMap) -} - -// CertificateListResult the certificate list result. -type CertificateListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of certificates in the key vault along with a link to the next page of certificates. - Value *[]CertificateItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of certificates. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateListResult. -func (clr CertificateListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CertificateListResultIterator provides access to a complete listing of CertificateItem values. -type CertificateListResultIterator struct { - i int - page CertificateListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *CertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *CertificateListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter CertificateListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter CertificateListResultIterator) Response() CertificateListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter CertificateListResultIterator) Value() CertificateItem { - if !iter.page.NotDone() { - return CertificateItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the CertificateListResultIterator type. -func NewCertificateListResultIterator(page CertificateListResultPage) CertificateListResultIterator { - return CertificateListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (clr CertificateListResult) IsEmpty() bool { - return clr.Value == nil || len(*clr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (clr CertificateListResult) hasNextLink() bool { - return clr.NextLink != nil && len(*clr.NextLink) != 0 -} - -// certificateListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (clr CertificateListResult) certificateListResultPreparer(ctx context.Context) (*http.Request, error) { - if !clr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(clr.NextLink))) -} - -// CertificateListResultPage contains a page of CertificateItem values. -type CertificateListResultPage struct { - fn func(context.Context, CertificateListResult) (CertificateListResult, error) - clr CertificateListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *CertificateListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.clr) - if err != nil { - return err - } - page.clr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *CertificateListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page CertificateListResultPage) NotDone() bool { - return !page.clr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page CertificateListResultPage) Response() CertificateListResult { - return page.clr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page CertificateListResultPage) Values() []CertificateItem { - if page.clr.IsEmpty() { - return nil - } - return *page.clr.Value -} - -// Creates a new instance of the CertificateListResultPage type. -func NewCertificateListResultPage(cur CertificateListResult, getNextPage func(context.Context, CertificateListResult) (CertificateListResult, error)) CertificateListResultPage { - return CertificateListResultPage{ - fn: getNextPage, - clr: cur, - } -} - -// CertificateMergeParameters the certificate merge parameters -type CertificateMergeParameters struct { - // X509Certificates - The certificate or the certificate chain to merge. - X509Certificates *[][]byte `json:"x5c,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateMergeParameters. -func (cmp CertificateMergeParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cmp.X509Certificates != nil { - objectMap["x5c"] = cmp.X509Certificates - } - if cmp.CertificateAttributes != nil { - objectMap["attributes"] = cmp.CertificateAttributes - } - if cmp.Tags != nil { - objectMap["tags"] = cmp.Tags - } - return json.Marshal(objectMap) -} - -// CertificateOperation a certificate operation is returned in case of asynchronous requests. -type CertificateOperation struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. - IssuerParameters *IssuerParameters `json:"issuer,omitempty"` - // Csr - The certificate signing request (CSR) that is being used in the certificate operation. - Csr *[]byte `json:"csr,omitempty"` - // CancellationRequested - Indicates if cancellation was requested on the certificate operation. - CancellationRequested *bool `json:"cancellation_requested,omitempty"` - // Status - Status of the certificate operation. - Status *string `json:"status,omitempty"` - // StatusDetails - The status details of the certificate operation. - StatusDetails *string `json:"status_details,omitempty"` - // Error - Error encountered, if any, during the certificate operation. - Error *Error `json:"error,omitempty"` - // Target - Location which contains the result of the certificate operation. - Target *string `json:"target,omitempty"` - // RequestID - Identifier for the certificate operation. - RequestID *string `json:"request_id,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateOperation. -func (co CertificateOperation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if co.IssuerParameters != nil { - objectMap["issuer"] = co.IssuerParameters - } - if co.Csr != nil { - objectMap["csr"] = co.Csr - } - if co.CancellationRequested != nil { - objectMap["cancellation_requested"] = co.CancellationRequested - } - if co.Status != nil { - objectMap["status"] = co.Status - } - if co.StatusDetails != nil { - objectMap["status_details"] = co.StatusDetails - } - if co.Error != nil { - objectMap["error"] = co.Error - } - if co.Target != nil { - objectMap["target"] = co.Target - } - if co.RequestID != nil { - objectMap["request_id"] = co.RequestID - } - return json.Marshal(objectMap) -} - -// CertificateOperationUpdateParameter the certificate operation update parameters. -type CertificateOperationUpdateParameter struct { - // CancellationRequested - Indicates if cancellation was requested on the certificate operation. - CancellationRequested *bool `json:"cancellation_requested,omitempty"` -} - -// CertificatePolicy management policy for a certificate. -type CertificatePolicy struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // KeyProperties - Properties of the key backing a certificate. - KeyProperties *KeyProperties `json:"key_props,omitempty"` - // SecretProperties - Properties of the secret backing a certificate. - SecretProperties *SecretProperties `json:"secret_props,omitempty"` - // X509CertificateProperties - Properties of the X509 component of a certificate. - X509CertificateProperties *X509CertificateProperties `json:"x509_props,omitempty"` - // LifetimeActions - Actions that will be performed by Key Vault over the lifetime of a certificate. - LifetimeActions *[]LifetimeAction `json:"lifetime_actions,omitempty"` - // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. - IssuerParameters *IssuerParameters `json:"issuer,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificatePolicy. -func (cp CertificatePolicy) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cp.KeyProperties != nil { - objectMap["key_props"] = cp.KeyProperties - } - if cp.SecretProperties != nil { - objectMap["secret_props"] = cp.SecretProperties - } - if cp.X509CertificateProperties != nil { - objectMap["x509_props"] = cp.X509CertificateProperties - } - if cp.LifetimeActions != nil { - objectMap["lifetime_actions"] = cp.LifetimeActions - } - if cp.IssuerParameters != nil { - objectMap["issuer"] = cp.IssuerParameters - } - if cp.Attributes != nil { - objectMap["attributes"] = cp.Attributes - } - return json.Marshal(objectMap) -} - -// CertificateRestoreParameters the certificate restore parameters. -type CertificateRestoreParameters struct { - // CertificateBundleBackup - The backup blob associated with a certificate bundle. (a URL-encoded base64 string) - CertificateBundleBackup *string `json:"value,omitempty"` -} - -// CertificateUpdateParameters the certificate update parameters. -type CertificateUpdateParameters struct { - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateUpdateParameters. -func (cup CertificateUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cup.CertificatePolicy != nil { - objectMap["policy"] = cup.CertificatePolicy - } - if cup.CertificateAttributes != nil { - objectMap["attributes"] = cup.CertificateAttributes - } - if cup.Tags != nil { - objectMap["tags"] = cup.Tags - } - return json.Marshal(objectMap) -} - -// Contact the contact information for the vault certificates. -type Contact struct { - // EmailAddress - Email address. - EmailAddress *string `json:"email,omitempty"` - // Name - Name. - Name *string `json:"name,omitempty"` - // Phone - Phone number. - Phone *string `json:"phone,omitempty"` -} - -// Contacts the contacts for the vault certificates. -type Contacts struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; Identifier for the contacts collection. - ID *string `json:"id,omitempty"` - // ContactList - The contact list for the vault certificates. - ContactList *[]Contact `json:"contacts,omitempty"` -} - -// MarshalJSON is the custom marshaler for Contacts. -func (c Contacts) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if c.ContactList != nil { - objectMap["contacts"] = c.ContactList - } - return json.Marshal(objectMap) -} - -// DeletedCertificateBundle a Deleted Certificate consisting of its previous id, attributes and its tags, -// as well as information on when it will be purged. -type DeletedCertificateBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // Kid - READ-ONLY; The key id. - Kid *string `json:"kid,omitempty"` - // Sid - READ-ONLY; The secret id. - Sid *string `json:"sid,omitempty"` - // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` - // Policy - READ-ONLY; The management policy. - Policy *CertificatePolicy `json:"policy,omitempty"` - // Cer - CER contents of x509 certificate. - Cer *[]byte `json:"cer,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateBundle. -func (dcb DeletedCertificateBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dcb.RecoveryID != nil { - objectMap["recoveryId"] = dcb.RecoveryID - } - if dcb.Cer != nil { - objectMap["cer"] = dcb.Cer - } - if dcb.ContentType != nil { - objectMap["contentType"] = dcb.ContentType - } - if dcb.Attributes != nil { - objectMap["attributes"] = dcb.Attributes - } - if dcb.Tags != nil { - objectMap["tags"] = dcb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedCertificateItem the deleted certificate item containing metadata about the deleted certificate. -type DeletedCertificateItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - Certificate identifier. - ID *string `json:"id,omitempty"` - // Attributes - The certificate management attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateItem. -func (dci DeletedCertificateItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dci.RecoveryID != nil { - objectMap["recoveryId"] = dci.RecoveryID - } - if dci.ID != nil { - objectMap["id"] = dci.ID - } - if dci.Attributes != nil { - objectMap["attributes"] = dci.Attributes - } - if dci.Tags != nil { - objectMap["tags"] = dci.Tags - } - if dci.X509Thumbprint != nil { - objectMap["x5t"] = dci.X509Thumbprint - } - return json.Marshal(objectMap) -} - -// DeletedCertificateListResult a list of certificates that have been deleted in this vault. -type DeletedCertificateListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of deleted certificates in the vault along with a link to the next page of deleted certificates - Value *[]DeletedCertificateItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted certificates. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateListResult. -func (dclr DeletedCertificateListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedCertificateListResultIterator provides access to a complete listing of DeletedCertificateItem -// values. -type DeletedCertificateListResultIterator struct { - i int - page DeletedCertificateListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedCertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedCertificateListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedCertificateListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedCertificateListResultIterator) Response() DeletedCertificateListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedCertificateListResultIterator) Value() DeletedCertificateItem { - if !iter.page.NotDone() { - return DeletedCertificateItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedCertificateListResultIterator type. -func NewDeletedCertificateListResultIterator(page DeletedCertificateListResultPage) DeletedCertificateListResultIterator { - return DeletedCertificateListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dclr DeletedCertificateListResult) IsEmpty() bool { - return dclr.Value == nil || len(*dclr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dclr DeletedCertificateListResult) hasNextLink() bool { - return dclr.NextLink != nil && len(*dclr.NextLink) != 0 -} - -// deletedCertificateListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dclr DeletedCertificateListResult) deletedCertificateListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dclr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dclr.NextLink))) -} - -// DeletedCertificateListResultPage contains a page of DeletedCertificateItem values. -type DeletedCertificateListResultPage struct { - fn func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error) - dclr DeletedCertificateListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedCertificateListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dclr) - if err != nil { - return err - } - page.dclr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedCertificateListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedCertificateListResultPage) NotDone() bool { - return !page.dclr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedCertificateListResultPage) Response() DeletedCertificateListResult { - return page.dclr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedCertificateListResultPage) Values() []DeletedCertificateItem { - if page.dclr.IsEmpty() { - return nil - } - return *page.dclr.Value -} - -// Creates a new instance of the DeletedCertificateListResultPage type. -func NewDeletedCertificateListResultPage(cur DeletedCertificateListResult, getNextPage func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error)) DeletedCertificateListResultPage { - return DeletedCertificateListResultPage{ - fn: getNextPage, - dclr: cur, - } -} - -// DeletedKeyBundle a DeletedKeyBundle consisting of a WebKey plus its Attributes and deletion info -type DeletedKeyBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Key - The Json web key. - Key *JSONWebKey `json:"key,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyBundle. -func (dkb DeletedKeyBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dkb.RecoveryID != nil { - objectMap["recoveryId"] = dkb.RecoveryID - } - if dkb.Key != nil { - objectMap["key"] = dkb.Key - } - if dkb.Attributes != nil { - objectMap["attributes"] = dkb.Attributes - } - if dkb.Tags != nil { - objectMap["tags"] = dkb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedKeyItem the deleted key item containing the deleted key metadata and information about deletion. -type DeletedKeyItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyItem. -func (dki DeletedKeyItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dki.RecoveryID != nil { - objectMap["recoveryId"] = dki.RecoveryID - } - if dki.Kid != nil { - objectMap["kid"] = dki.Kid - } - if dki.Attributes != nil { - objectMap["attributes"] = dki.Attributes - } - if dki.Tags != nil { - objectMap["tags"] = dki.Tags - } - return json.Marshal(objectMap) -} - -// DeletedKeyListResult a list of keys that have been deleted in this vault. -type DeletedKeyListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of deleted keys in the vault along with a link to the next page of deleted keys - Value *[]DeletedKeyItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted keys. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyListResult. -func (dklr DeletedKeyListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedKeyListResultIterator provides access to a complete listing of DeletedKeyItem values. -type DeletedKeyListResultIterator struct { - i int - page DeletedKeyListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedKeyListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedKeyListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedKeyListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedKeyListResultIterator) Response() DeletedKeyListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedKeyListResultIterator) Value() DeletedKeyItem { - if !iter.page.NotDone() { - return DeletedKeyItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedKeyListResultIterator type. -func NewDeletedKeyListResultIterator(page DeletedKeyListResultPage) DeletedKeyListResultIterator { - return DeletedKeyListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dklr DeletedKeyListResult) IsEmpty() bool { - return dklr.Value == nil || len(*dklr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dklr DeletedKeyListResult) hasNextLink() bool { - return dklr.NextLink != nil && len(*dklr.NextLink) != 0 -} - -// deletedKeyListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dklr DeletedKeyListResult) deletedKeyListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dklr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dklr.NextLink))) -} - -// DeletedKeyListResultPage contains a page of DeletedKeyItem values. -type DeletedKeyListResultPage struct { - fn func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error) - dklr DeletedKeyListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedKeyListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dklr) - if err != nil { - return err - } - page.dklr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedKeyListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedKeyListResultPage) NotDone() bool { - return !page.dklr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedKeyListResultPage) Response() DeletedKeyListResult { - return page.dklr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedKeyListResultPage) Values() []DeletedKeyItem { - if page.dklr.IsEmpty() { - return nil - } - return *page.dklr.Value -} - -// Creates a new instance of the DeletedKeyListResultPage type. -func NewDeletedKeyListResultPage(cur DeletedKeyListResult, getNextPage func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error)) DeletedKeyListResultPage { - return DeletedKeyListResultPage{ - fn: getNextPage, - dklr: cur, - } -} - -// DeletedSasDefinitionBundle a deleted SAS definition bundle consisting of its previous id, attributes and -// its tags, as well as information on when it will be purged. -type DeletedSasDefinitionBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The SAS definition id. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; Storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // Attributes - READ-ONLY; The SAS definition attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedSasDefinitionBundle. -func (dsdb DeletedSasDefinitionBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsdb.RecoveryID != nil { - objectMap["recoveryId"] = dsdb.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedSasDefinitionItem the deleted SAS definition item containing metadata about the deleted SAS -// definition. -type DeletedSasDefinitionItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The storage SAS identifier. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; The storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // Attributes - READ-ONLY; The SAS definition management attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedSasDefinitionItem. -func (dsdi DeletedSasDefinitionItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsdi.RecoveryID != nil { - objectMap["recoveryId"] = dsdi.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedSasDefinitionListResult the deleted SAS definition list result -type DeletedSasDefinitionListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted SAS definitions in the vault along with a link to the next page of deleted sas definitions - Value *[]DeletedSasDefinitionItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted SAS definitions. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSasDefinitionListResult. -func (dsdlr DeletedSasDefinitionListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedSasDefinitionListResultIterator provides access to a complete listing of DeletedSasDefinitionItem -// values. -type DeletedSasDefinitionListResultIterator struct { - i int - page DeletedSasDefinitionListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedSasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedSasDefinitionListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedSasDefinitionListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedSasDefinitionListResultIterator) Response() DeletedSasDefinitionListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedSasDefinitionListResultIterator) Value() DeletedSasDefinitionItem { - if !iter.page.NotDone() { - return DeletedSasDefinitionItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedSasDefinitionListResultIterator type. -func NewDeletedSasDefinitionListResultIterator(page DeletedSasDefinitionListResultPage) DeletedSasDefinitionListResultIterator { - return DeletedSasDefinitionListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dsdlr DeletedSasDefinitionListResult) IsEmpty() bool { - return dsdlr.Value == nil || len(*dsdlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dsdlr DeletedSasDefinitionListResult) hasNextLink() bool { - return dsdlr.NextLink != nil && len(*dsdlr.NextLink) != 0 -} - -// deletedSasDefinitionListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dsdlr DeletedSasDefinitionListResult) deletedSasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dsdlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dsdlr.NextLink))) -} - -// DeletedSasDefinitionListResultPage contains a page of DeletedSasDefinitionItem values. -type DeletedSasDefinitionListResultPage struct { - fn func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error) - dsdlr DeletedSasDefinitionListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedSasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dsdlr) - if err != nil { - return err - } - page.dsdlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedSasDefinitionListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedSasDefinitionListResultPage) NotDone() bool { - return !page.dsdlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedSasDefinitionListResultPage) Response() DeletedSasDefinitionListResult { - return page.dsdlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedSasDefinitionListResultPage) Values() []DeletedSasDefinitionItem { - if page.dsdlr.IsEmpty() { - return nil - } - return *page.dsdlr.Value -} - -// Creates a new instance of the DeletedSasDefinitionListResultPage type. -func NewDeletedSasDefinitionListResultPage(cur DeletedSasDefinitionListResult, getNextPage func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error)) DeletedSasDefinitionListResultPage { - return DeletedSasDefinitionListResultPage{ - fn: getNextPage, - dsdlr: cur, - } -} - -// DeletedSecretBundle a Deleted Secret consisting of its previous id, attributes and its tags, as well as -// information on when it will be purged. -type DeletedSecretBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Value - The secret value. - Value *string `json:"value,omitempty"` - // ID - The secret id. - ID *string `json:"id,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. - Kid *string `json:"kid,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretBundle. -func (dsb DeletedSecretBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsb.RecoveryID != nil { - objectMap["recoveryId"] = dsb.RecoveryID - } - if dsb.Value != nil { - objectMap["value"] = dsb.Value - } - if dsb.ID != nil { - objectMap["id"] = dsb.ID - } - if dsb.ContentType != nil { - objectMap["contentType"] = dsb.ContentType - } - if dsb.Attributes != nil { - objectMap["attributes"] = dsb.Attributes - } - if dsb.Tags != nil { - objectMap["tags"] = dsb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedSecretItem the deleted secret item containing metadata about the deleted secret. -type DeletedSecretItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - Secret identifier. - ID *string `json:"id,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretItem. -func (dsi DeletedSecretItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsi.RecoveryID != nil { - objectMap["recoveryId"] = dsi.RecoveryID - } - if dsi.ID != nil { - objectMap["id"] = dsi.ID - } - if dsi.Attributes != nil { - objectMap["attributes"] = dsi.Attributes - } - if dsi.Tags != nil { - objectMap["tags"] = dsi.Tags - } - if dsi.ContentType != nil { - objectMap["contentType"] = dsi.ContentType - } - return json.Marshal(objectMap) -} - -// DeletedSecretListResult the deleted secret list result -type DeletedSecretListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted secrets in the vault along with a link to the next page of deleted secrets - Value *[]DeletedSecretItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted secrets. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretListResult. -func (dslr DeletedSecretListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedSecretListResultIterator provides access to a complete listing of DeletedSecretItem values. -type DeletedSecretListResultIterator struct { - i int - page DeletedSecretListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedSecretListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedSecretListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedSecretListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedSecretListResultIterator) Response() DeletedSecretListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedSecretListResultIterator) Value() DeletedSecretItem { - if !iter.page.NotDone() { - return DeletedSecretItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedSecretListResultIterator type. -func NewDeletedSecretListResultIterator(page DeletedSecretListResultPage) DeletedSecretListResultIterator { - return DeletedSecretListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dslr DeletedSecretListResult) IsEmpty() bool { - return dslr.Value == nil || len(*dslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dslr DeletedSecretListResult) hasNextLink() bool { - return dslr.NextLink != nil && len(*dslr.NextLink) != 0 -} - -// deletedSecretListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dslr DeletedSecretListResult) deletedSecretListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dslr.NextLink))) -} - -// DeletedSecretListResultPage contains a page of DeletedSecretItem values. -type DeletedSecretListResultPage struct { - fn func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error) - dslr DeletedSecretListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedSecretListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dslr) - if err != nil { - return err - } - page.dslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedSecretListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedSecretListResultPage) NotDone() bool { - return !page.dslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedSecretListResultPage) Response() DeletedSecretListResult { - return page.dslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedSecretListResultPage) Values() []DeletedSecretItem { - if page.dslr.IsEmpty() { - return nil - } - return *page.dslr.Value -} - -// Creates a new instance of the DeletedSecretListResultPage type. -func NewDeletedSecretListResultPage(cur DeletedSecretListResult, getNextPage func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error)) DeletedSecretListResultPage { - return DeletedSecretListResultPage{ - fn: getNextPage, - dslr: cur, - } -} - -// DeletedStorageAccountItem the deleted storage account item containing metadata about the deleted storage -// account. -type DeletedStorageAccountItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; Storage identifier. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; Storage account resource Id. - ResourceID *string `json:"resourceId,omitempty"` - // Attributes - READ-ONLY; The storage account management attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedStorageAccountItem. -func (dsai DeletedStorageAccountItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsai.RecoveryID != nil { - objectMap["recoveryId"] = dsai.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedStorageBundle a deleted storage account bundle consisting of its previous id, attributes and its -// tags, as well as information on when it will be purged. -type DeletedStorageBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The storage account id. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; The storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - READ-ONLY; The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // Attributes - READ-ONLY; The storage account attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedStorageBundle. -func (dsb DeletedStorageBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsb.RecoveryID != nil { - objectMap["recoveryId"] = dsb.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedStorageListResult the deleted storage account list result -type DeletedStorageListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted storage accounts in the vault along with a link to the next page of deleted storage accounts - Value *[]DeletedStorageAccountItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted storage accounts. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedStorageListResult. -func (dslr DeletedStorageListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedStorageListResultIterator provides access to a complete listing of DeletedStorageAccountItem -// values. -type DeletedStorageListResultIterator struct { - i int - page DeletedStorageListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedStorageListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedStorageListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedStorageListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedStorageListResultIterator) Response() DeletedStorageListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedStorageListResultIterator) Value() DeletedStorageAccountItem { - if !iter.page.NotDone() { - return DeletedStorageAccountItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedStorageListResultIterator type. -func NewDeletedStorageListResultIterator(page DeletedStorageListResultPage) DeletedStorageListResultIterator { - return DeletedStorageListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dslr DeletedStorageListResult) IsEmpty() bool { - return dslr.Value == nil || len(*dslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dslr DeletedStorageListResult) hasNextLink() bool { - return dslr.NextLink != nil && len(*dslr.NextLink) != 0 -} - -// deletedStorageListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dslr DeletedStorageListResult) deletedStorageListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dslr.NextLink))) -} - -// DeletedStorageListResultPage contains a page of DeletedStorageAccountItem values. -type DeletedStorageListResultPage struct { - fn func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error) - dslr DeletedStorageListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedStorageListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dslr) - if err != nil { - return err - } - page.dslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedStorageListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedStorageListResultPage) NotDone() bool { - return !page.dslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedStorageListResultPage) Response() DeletedStorageListResult { - return page.dslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedStorageListResultPage) Values() []DeletedStorageAccountItem { - if page.dslr.IsEmpty() { - return nil - } - return *page.dslr.Value -} - -// Creates a new instance of the DeletedStorageListResultPage type. -func NewDeletedStorageListResultPage(cur DeletedStorageListResult, getNextPage func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error)) DeletedStorageListResultPage { - return DeletedStorageListResultPage{ - fn: getNextPage, - dslr: cur, - } -} - -// Error the key vault server error. -type Error struct { - // Code - READ-ONLY; The error code. - Code *string `json:"code,omitempty"` - // Message - READ-ONLY; The error message. - Message *string `json:"message,omitempty"` - // InnerError - READ-ONLY - InnerError *Error `json:"innererror,omitempty"` -} - -// MarshalJSON is the custom marshaler for Error. -func (e Error) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ErrorType the key vault error exception. -type ErrorType struct { - // Error - READ-ONLY - Error *Error `json:"error,omitempty"` -} - -// MarshalJSON is the custom marshaler for ErrorType. -func (et ErrorType) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// IssuerAttributes the attributes of an issuer managed by the Key Vault service. -type IssuerAttributes struct { - // Enabled - Determines whether the issuer is enabled. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for IssuerAttributes. -func (ia IssuerAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ia.Enabled != nil { - objectMap["enabled"] = ia.Enabled - } - return json.Marshal(objectMap) -} - -// IssuerBundle the issuer for Key Vault certificate. -type IssuerBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; Identifier for the issuer object. - ID *string `json:"id,omitempty"` - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for IssuerBundle. -func (ib IssuerBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ib.Provider != nil { - objectMap["provider"] = ib.Provider - } - if ib.Credentials != nil { - objectMap["credentials"] = ib.Credentials - } - if ib.OrganizationDetails != nil { - objectMap["org_details"] = ib.OrganizationDetails - } - if ib.Attributes != nil { - objectMap["attributes"] = ib.Attributes - } - return json.Marshal(objectMap) -} - -// IssuerCredentials the credentials to be used for the certificate issuer. -type IssuerCredentials struct { - // AccountID - The user name/account name/account id. - AccountID *string `json:"account_id,omitempty"` - // Password - The password/secret/account key. - Password *string `json:"pwd,omitempty"` -} - -// IssuerParameters parameters for the issuer of the X509 component of a certificate. -type IssuerParameters struct { - // Name - Name of the referenced issuer object or reserved names; for example, 'Self' or 'Unknown'. - Name *string `json:"name,omitempty"` - // CertificateType - Certificate type as supported by the provider (optional); for example 'OV-SSL', 'EV-SSL' - CertificateType *string `json:"cty,omitempty"` - // CertificateTransparency - Indicates if the certificates generated under this policy should be published to certificate transparency logs. - CertificateTransparency *bool `json:"cert_transparency,omitempty"` -} - -// JSONWebKey as of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18 -type JSONWebKey struct { - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Kty - JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - Kty JSONWebKeyType `json:"kty,omitempty"` - KeyOps *[]string `json:"key_ops,omitempty"` - // N - RSA modulus. (a URL-encoded base64 string) - N *string `json:"n,omitempty"` - // E - RSA public exponent. (a URL-encoded base64 string) - E *string `json:"e,omitempty"` - // D - RSA private exponent, or the D component of an EC private key. (a URL-encoded base64 string) - D *string `json:"d,omitempty"` - // DP - RSA private key parameter. (a URL-encoded base64 string) - DP *string `json:"dp,omitempty"` - // DQ - RSA private key parameter. (a URL-encoded base64 string) - DQ *string `json:"dq,omitempty"` - // QI - RSA private key parameter. (a URL-encoded base64 string) - QI *string `json:"qi,omitempty"` - // P - RSA secret prime. (a URL-encoded base64 string) - P *string `json:"p,omitempty"` - // Q - RSA secret prime, with p < q. (a URL-encoded base64 string) - Q *string `json:"q,omitempty"` - // K - Symmetric key. (a URL-encoded base64 string) - K *string `json:"k,omitempty"` - // T - HSM Token, used with 'Bring Your Own Key'. (a URL-encoded base64 string) - T *string `json:"key_hsm,omitempty"` - // Crv - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' - Crv JSONWebKeyCurveName `json:"crv,omitempty"` - // X - X component of an EC public key. (a URL-encoded base64 string) - X *string `json:"x,omitempty"` - // Y - Y component of an EC public key. (a URL-encoded base64 string) - Y *string `json:"y,omitempty"` -} - -// KeyAttributes the attributes of a key managed by the key vault service. -type KeyAttributes struct { - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains 'Purgeable' the key can be permanently deleted by a privileged user; otherwise, only the system can purge the key, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyAttributes. -func (ka KeyAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ka.Enabled != nil { - objectMap["enabled"] = ka.Enabled - } - if ka.NotBefore != nil { - objectMap["nbf"] = ka.NotBefore - } - if ka.Expires != nil { - objectMap["exp"] = ka.Expires - } - return json.Marshal(objectMap) -} - -// KeyBundle a KeyBundle consisting of a WebKey plus its attributes. -type KeyBundle struct { - autorest.Response `json:"-"` - // Key - The Json web key. - Key *JSONWebKey `json:"key,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyBundle. -func (kb KeyBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kb.Key != nil { - objectMap["key"] = kb.Key - } - if kb.Attributes != nil { - objectMap["attributes"] = kb.Attributes - } - if kb.Tags != nil { - objectMap["tags"] = kb.Tags - } - return json.Marshal(objectMap) -} - -// KeyCreateParameters the key create parameters. -type KeyCreateParameters struct { - // Kty - The type of key to create. For valid values, see JsonWebKeyType. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - Kty JSONWebKeyType `json:"kty,omitempty"` - // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. - KeySize *int32 `json:"key_size,omitempty"` - KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' - Curve JSONWebKeyCurveName `json:"crv,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyCreateParameters. -func (kcp KeyCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kcp.Kty != "" { - objectMap["kty"] = kcp.Kty - } - if kcp.KeySize != nil { - objectMap["key_size"] = kcp.KeySize - } - if kcp.KeyOps != nil { - objectMap["key_ops"] = kcp.KeyOps - } - if kcp.KeyAttributes != nil { - objectMap["attributes"] = kcp.KeyAttributes - } - if kcp.Tags != nil { - objectMap["tags"] = kcp.Tags - } - if kcp.Curve != "" { - objectMap["crv"] = kcp.Curve - } - return json.Marshal(objectMap) -} - -// KeyImportParameters the key import parameters. -type KeyImportParameters struct { - // Hsm - Whether to import as a hardware key (HSM) or software key. - Hsm *bool `json:"Hsm,omitempty"` - // Key - The Json web key - Key *JSONWebKey `json:"key,omitempty"` - // KeyAttributes - The key management attributes. - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for KeyImportParameters. -func (kip KeyImportParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kip.Hsm != nil { - objectMap["Hsm"] = kip.Hsm - } - if kip.Key != nil { - objectMap["key"] = kip.Key - } - if kip.KeyAttributes != nil { - objectMap["attributes"] = kip.KeyAttributes - } - if kip.Tags != nil { - objectMap["tags"] = kip.Tags - } - return json.Marshal(objectMap) -} - -// KeyItem the key item containing key metadata. -type KeyItem struct { - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyItem. -func (ki KeyItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ki.Kid != nil { - objectMap["kid"] = ki.Kid - } - if ki.Attributes != nil { - objectMap["attributes"] = ki.Attributes - } - if ki.Tags != nil { - objectMap["tags"] = ki.Tags - } - return json.Marshal(objectMap) -} - -// KeyListResult the key list result. -type KeyListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of keys in the key vault along with a link to the next page of keys. - Value *[]KeyItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of keys. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyListResult. -func (klr KeyListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// KeyListResultIterator provides access to a complete listing of KeyItem values. -type KeyListResultIterator struct { - i int - page KeyListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *KeyListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *KeyListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter KeyListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter KeyListResultIterator) Response() KeyListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter KeyListResultIterator) Value() KeyItem { - if !iter.page.NotDone() { - return KeyItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the KeyListResultIterator type. -func NewKeyListResultIterator(page KeyListResultPage) KeyListResultIterator { - return KeyListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (klr KeyListResult) IsEmpty() bool { - return klr.Value == nil || len(*klr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (klr KeyListResult) hasNextLink() bool { - return klr.NextLink != nil && len(*klr.NextLink) != 0 -} - -// keyListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (klr KeyListResult) keyListResultPreparer(ctx context.Context) (*http.Request, error) { - if !klr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(klr.NextLink))) -} - -// KeyListResultPage contains a page of KeyItem values. -type KeyListResultPage struct { - fn func(context.Context, KeyListResult) (KeyListResult, error) - klr KeyListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *KeyListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.klr) - if err != nil { - return err - } - page.klr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *KeyListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page KeyListResultPage) NotDone() bool { - return !page.klr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page KeyListResultPage) Response() KeyListResult { - return page.klr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page KeyListResultPage) Values() []KeyItem { - if page.klr.IsEmpty() { - return nil - } - return *page.klr.Value -} - -// Creates a new instance of the KeyListResultPage type. -func NewKeyListResultPage(cur KeyListResult, getNextPage func(context.Context, KeyListResult) (KeyListResult, error)) KeyListResultPage { - return KeyListResultPage{ - fn: getNextPage, - klr: cur, - } -} - -// KeyOperationResult the key operation result. -type KeyOperationResult struct { - autorest.Response `json:"-"` - // Kid - READ-ONLY; Key identifier - Kid *string `json:"kid,omitempty"` - // Result - READ-ONLY; a URL-encoded base64 string - Result *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyOperationResult. -func (kor KeyOperationResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// KeyOperationsParameters the key operations parameters. -type KeyOperationsParameters struct { - // Algorithm - algorithm identifier. Possible values include: 'RSAOAEP', 'RSAOAEP256', 'RSA15' - Algorithm JSONWebKeyEncryptionAlgorithm `json:"alg,omitempty"` - // Value - a URL-encoded base64 string - Value *string `json:"value,omitempty"` -} - -// KeyProperties properties of the key pair backing a certificate. -type KeyProperties struct { - // Exportable - Indicates if the private key can be exported. - Exportable *bool `json:"exportable,omitempty"` - // KeyType - The type of key pair to be used for the certificate. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - KeyType JSONWebKeyType `json:"kty,omitempty"` - // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. - KeySize *int32 `json:"key_size,omitempty"` - // ReuseKey - Indicates if the same key pair will be used on certificate renewal. - ReuseKey *bool `json:"reuse_key,omitempty"` - // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' - Curve JSONWebKeyCurveName `json:"crv,omitempty"` -} - -// KeyRestoreParameters the key restore parameters. -type KeyRestoreParameters struct { - // KeyBundleBackup - The backup blob associated with a key bundle. (a URL-encoded base64 string) - KeyBundleBackup *string `json:"value,omitempty"` -} - -// KeySignParameters the key operations parameters. -type KeySignParameters struct { - // Algorithm - The signing/verification algorithm identifier. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' - Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` - // Value - a URL-encoded base64 string - Value *string `json:"value,omitempty"` -} - -// KeyUpdateParameters the key update parameters. -type KeyUpdateParameters struct { - // KeyOps - Json web key operations. For more information on possible key operations, see JsonWebKeyOperation. - KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for KeyUpdateParameters. -func (kup KeyUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kup.KeyOps != nil { - objectMap["key_ops"] = kup.KeyOps - } - if kup.KeyAttributes != nil { - objectMap["attributes"] = kup.KeyAttributes - } - if kup.Tags != nil { - objectMap["tags"] = kup.Tags - } - return json.Marshal(objectMap) -} - -// KeyVerifyParameters the key verify parameters. -type KeyVerifyParameters struct { - // Algorithm - The signing/verification algorithm. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' - Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` - // Digest - The digest used for signing. (a URL-encoded base64 string) - Digest *string `json:"digest,omitempty"` - // Signature - The signature to be verified. (a URL-encoded base64 string) - Signature *string `json:"value,omitempty"` -} - -// KeyVerifyResult the key verify result. -type KeyVerifyResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; True if the signature is verified, otherwise false. - Value *bool `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyVerifyResult. -func (kvr KeyVerifyResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// LifetimeAction action and its trigger that will be performed by Key Vault over the lifetime of a -// certificate. -type LifetimeAction struct { - // Trigger - The condition that will execute the action. - Trigger *Trigger `json:"trigger,omitempty"` - // Action - The action that will be executed. - Action *Action `json:"action,omitempty"` -} - -// OrganizationDetails details of the organization of the certificate issuer. -type OrganizationDetails struct { - // ID - Id of the organization. - ID *string `json:"id,omitempty"` - // AdminDetails - Details of the organization administrator. - AdminDetails *[]AdministratorDetails `json:"admin_details,omitempty"` -} - -// PendingCertificateSigningRequestResult the pending certificate signing request result. -type PendingCertificateSigningRequestResult struct { - // Value - READ-ONLY; The pending certificate signing request as Base64 encoded string. - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for PendingCertificateSigningRequestResult. -func (pcsrr PendingCertificateSigningRequestResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionAttributes the SAS definition management attributes. -type SasDefinitionAttributes struct { - // Enabled - the enabled state of the object. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for SAS definitions in the current vault. If it contains 'Purgeable' the SAS definition can be permanently deleted by a privileged user; otherwise, only the system can purge the SAS definition, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionAttributes. -func (sda SasDefinitionAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sda.Enabled != nil { - objectMap["enabled"] = sda.Enabled - } - return json.Marshal(objectMap) -} - -// SasDefinitionBundle a SAS definition bundle consists of key vault SAS definition details plus its -// attributes. -type SasDefinitionBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The SAS definition id. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; Storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // Attributes - READ-ONLY; The SAS definition attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionBundle. -func (sdb SasDefinitionBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionCreateParameters the SAS definition create parameters. -type SasDefinitionCreateParameters struct { - // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // SasDefinitionAttributes - The attributes of the SAS definition. - SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionCreateParameters. -func (sdcp SasDefinitionCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sdcp.TemplateURI != nil { - objectMap["templateUri"] = sdcp.TemplateURI - } - if sdcp.SasType != "" { - objectMap["sasType"] = sdcp.SasType - } - if sdcp.ValidityPeriod != nil { - objectMap["validityPeriod"] = sdcp.ValidityPeriod - } - if sdcp.SasDefinitionAttributes != nil { - objectMap["attributes"] = sdcp.SasDefinitionAttributes - } - if sdcp.Tags != nil { - objectMap["tags"] = sdcp.Tags - } - return json.Marshal(objectMap) -} - -// SasDefinitionItem the SAS definition item containing storage SAS definition metadata. -type SasDefinitionItem struct { - // ID - READ-ONLY; The storage SAS identifier. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; The storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // Attributes - READ-ONLY; The SAS definition management attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionItem. -func (sdi SasDefinitionItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionListResult the storage account SAS definition list result. -type SasDefinitionListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of SAS definitions along with a link to the next page of SAS definitions. - Value *[]SasDefinitionItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of SAS definitions. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionListResult. -func (sdlr SasDefinitionListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionListResultIterator provides access to a complete listing of SasDefinitionItem values. -type SasDefinitionListResultIterator struct { - i int - page SasDefinitionListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SasDefinitionListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SasDefinitionListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SasDefinitionListResultIterator) Response() SasDefinitionListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SasDefinitionListResultIterator) Value() SasDefinitionItem { - if !iter.page.NotDone() { - return SasDefinitionItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SasDefinitionListResultIterator type. -func NewSasDefinitionListResultIterator(page SasDefinitionListResultPage) SasDefinitionListResultIterator { - return SasDefinitionListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (sdlr SasDefinitionListResult) IsEmpty() bool { - return sdlr.Value == nil || len(*sdlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (sdlr SasDefinitionListResult) hasNextLink() bool { - return sdlr.NextLink != nil && len(*sdlr.NextLink) != 0 -} - -// sasDefinitionListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (sdlr SasDefinitionListResult) sasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { - if !sdlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(sdlr.NextLink))) -} - -// SasDefinitionListResultPage contains a page of SasDefinitionItem values. -type SasDefinitionListResultPage struct { - fn func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error) - sdlr SasDefinitionListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.sdlr) - if err != nil { - return err - } - page.sdlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SasDefinitionListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SasDefinitionListResultPage) NotDone() bool { - return !page.sdlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SasDefinitionListResultPage) Response() SasDefinitionListResult { - return page.sdlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SasDefinitionListResultPage) Values() []SasDefinitionItem { - if page.sdlr.IsEmpty() { - return nil - } - return *page.sdlr.Value -} - -// Creates a new instance of the SasDefinitionListResultPage type. -func NewSasDefinitionListResultPage(cur SasDefinitionListResult, getNextPage func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error)) SasDefinitionListResultPage { - return SasDefinitionListResultPage{ - fn: getNextPage, - sdlr: cur, - } -} - -// SasDefinitionUpdateParameters the SAS definition update parameters. -type SasDefinitionUpdateParameters struct { - // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // SasDefinitionAttributes - The attributes of the SAS definition. - SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionUpdateParameters. -func (sdup SasDefinitionUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sdup.TemplateURI != nil { - objectMap["templateUri"] = sdup.TemplateURI - } - if sdup.SasType != "" { - objectMap["sasType"] = sdup.SasType - } - if sdup.ValidityPeriod != nil { - objectMap["validityPeriod"] = sdup.ValidityPeriod - } - if sdup.SasDefinitionAttributes != nil { - objectMap["attributes"] = sdup.SasDefinitionAttributes - } - if sdup.Tags != nil { - objectMap["tags"] = sdup.Tags - } - return json.Marshal(objectMap) -} - -// SecretAttributes the secret management attributes. -type SecretAttributes struct { - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for secrets in the current vault. If it contains 'Purgeable', the secret can be permanently deleted by a privileged user; otherwise, only the system can purge the secret, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretAttributes. -func (sa SecretAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sa.Enabled != nil { - objectMap["enabled"] = sa.Enabled - } - if sa.NotBefore != nil { - objectMap["nbf"] = sa.NotBefore - } - if sa.Expires != nil { - objectMap["exp"] = sa.Expires - } - return json.Marshal(objectMap) -} - -// SecretBundle a secret consisting of a value, id and its attributes. -type SecretBundle struct { - autorest.Response `json:"-"` - // Value - The secret value. - Value *string `json:"value,omitempty"` - // ID - The secret id. - ID *string `json:"id,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. - Kid *string `json:"kid,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretBundle. -func (sb SecretBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sb.Value != nil { - objectMap["value"] = sb.Value - } - if sb.ID != nil { - objectMap["id"] = sb.ID - } - if sb.ContentType != nil { - objectMap["contentType"] = sb.ContentType - } - if sb.Attributes != nil { - objectMap["attributes"] = sb.Attributes - } - if sb.Tags != nil { - objectMap["tags"] = sb.Tags - } - return json.Marshal(objectMap) -} - -// SecretItem the secret item containing secret metadata. -type SecretItem struct { - // ID - Secret identifier. - ID *string `json:"id,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretItem. -func (si SecretItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if si.ID != nil { - objectMap["id"] = si.ID - } - if si.Attributes != nil { - objectMap["attributes"] = si.Attributes - } - if si.Tags != nil { - objectMap["tags"] = si.Tags - } - if si.ContentType != nil { - objectMap["contentType"] = si.ContentType - } - return json.Marshal(objectMap) -} - -// SecretListResult the secret list result. -type SecretListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of secrets in the key vault along with a link to the next page of secrets. - Value *[]SecretItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of secrets. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretListResult. -func (slr SecretListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SecretListResultIterator provides access to a complete listing of SecretItem values. -type SecretListResultIterator struct { - i int - page SecretListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SecretListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SecretListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SecretListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SecretListResultIterator) Response() SecretListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SecretListResultIterator) Value() SecretItem { - if !iter.page.NotDone() { - return SecretItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SecretListResultIterator type. -func NewSecretListResultIterator(page SecretListResultPage) SecretListResultIterator { - return SecretListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (slr SecretListResult) IsEmpty() bool { - return slr.Value == nil || len(*slr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (slr SecretListResult) hasNextLink() bool { - return slr.NextLink != nil && len(*slr.NextLink) != 0 -} - -// secretListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (slr SecretListResult) secretListResultPreparer(ctx context.Context) (*http.Request, error) { - if !slr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(slr.NextLink))) -} - -// SecretListResultPage contains a page of SecretItem values. -type SecretListResultPage struct { - fn func(context.Context, SecretListResult) (SecretListResult, error) - slr SecretListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SecretListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.slr) - if err != nil { - return err - } - page.slr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SecretListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SecretListResultPage) NotDone() bool { - return !page.slr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SecretListResultPage) Response() SecretListResult { - return page.slr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SecretListResultPage) Values() []SecretItem { - if page.slr.IsEmpty() { - return nil - } - return *page.slr.Value -} - -// Creates a new instance of the SecretListResultPage type. -func NewSecretListResultPage(cur SecretListResult, getNextPage func(context.Context, SecretListResult) (SecretListResult, error)) SecretListResultPage { - return SecretListResultPage{ - fn: getNextPage, - slr: cur, - } -} - -// SecretProperties properties of the key backing a certificate. -type SecretProperties struct { - // ContentType - The media type (MIME type). - ContentType *string `json:"contentType,omitempty"` -} - -// SecretRestoreParameters the secret restore parameters. -type SecretRestoreParameters struct { - // SecretBundleBackup - The backup blob associated with a secret bundle. (a URL-encoded base64 string) - SecretBundleBackup *string `json:"value,omitempty"` -} - -// SecretSetParameters the secret set parameters. -type SecretSetParameters struct { - // Value - The value of the secret. - Value *string `json:"value,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // SecretAttributes - The secret management attributes. - SecretAttributes *SecretAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretSetParameters. -func (ssp SecretSetParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ssp.Value != nil { - objectMap["value"] = ssp.Value - } - if ssp.Tags != nil { - objectMap["tags"] = ssp.Tags - } - if ssp.ContentType != nil { - objectMap["contentType"] = ssp.ContentType - } - if ssp.SecretAttributes != nil { - objectMap["attributes"] = ssp.SecretAttributes - } - return json.Marshal(objectMap) -} - -// SecretUpdateParameters the secret update parameters. -type SecretUpdateParameters struct { - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // SecretAttributes - The secret management attributes. - SecretAttributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SecretUpdateParameters. -func (sup SecretUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sup.ContentType != nil { - objectMap["contentType"] = sup.ContentType - } - if sup.SecretAttributes != nil { - objectMap["attributes"] = sup.SecretAttributes - } - if sup.Tags != nil { - objectMap["tags"] = sup.Tags - } - return json.Marshal(objectMap) -} - -// StorageAccountAttributes the storage account management attributes. -type StorageAccountAttributes struct { - // Enabled - the enabled state of the object. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for storage accounts in the current vault. If it contains 'Purgeable' the storage account can be permanently deleted by a privileged user; otherwise, only the system can purge the storage account, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageAccountAttributes. -func (saa StorageAccountAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if saa.Enabled != nil { - objectMap["enabled"] = saa.Enabled - } - return json.Marshal(objectMap) -} - -// StorageAccountCreateParameters the storage account create parameters. -type StorageAccountCreateParameters struct { - // ResourceID - Storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - Current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // StorageAccountAttributes - The attributes of the storage account. - StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountCreateParameters. -func (sacp StorageAccountCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sacp.ResourceID != nil { - objectMap["resourceId"] = sacp.ResourceID - } - if sacp.ActiveKeyName != nil { - objectMap["activeKeyName"] = sacp.ActiveKeyName - } - if sacp.AutoRegenerateKey != nil { - objectMap["autoRegenerateKey"] = sacp.AutoRegenerateKey - } - if sacp.RegenerationPeriod != nil { - objectMap["regenerationPeriod"] = sacp.RegenerationPeriod - } - if sacp.StorageAccountAttributes != nil { - objectMap["attributes"] = sacp.StorageAccountAttributes - } - if sacp.Tags != nil { - objectMap["tags"] = sacp.Tags - } - return json.Marshal(objectMap) -} - -// StorageAccountItem the storage account item containing storage account metadata. -type StorageAccountItem struct { - // ID - READ-ONLY; Storage identifier. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; Storage account resource Id. - ResourceID *string `json:"resourceId,omitempty"` - // Attributes - READ-ONLY; The storage account management attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountItem. -func (sai StorageAccountItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageAccountRegenerteKeyParameters the storage account key regenerate parameters. -type StorageAccountRegenerteKeyParameters struct { - // KeyName - The storage account key name. - KeyName *string `json:"keyName,omitempty"` -} - -// StorageAccountUpdateParameters the storage account update parameters. -type StorageAccountUpdateParameters struct { - // ActiveKeyName - The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // StorageAccountAttributes - The attributes of the storage account. - StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountUpdateParameters. -func (saup StorageAccountUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if saup.ActiveKeyName != nil { - objectMap["activeKeyName"] = saup.ActiveKeyName - } - if saup.AutoRegenerateKey != nil { - objectMap["autoRegenerateKey"] = saup.AutoRegenerateKey - } - if saup.RegenerationPeriod != nil { - objectMap["regenerationPeriod"] = saup.RegenerationPeriod - } - if saup.StorageAccountAttributes != nil { - objectMap["attributes"] = saup.StorageAccountAttributes - } - if saup.Tags != nil { - objectMap["tags"] = saup.Tags - } - return json.Marshal(objectMap) -} - -// StorageBundle a Storage account bundle consists of key vault storage account details plus its -// attributes. -type StorageBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The storage account id. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; The storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - READ-ONLY; The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // Attributes - READ-ONLY; The storage account attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageBundle. -func (sb StorageBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageListResult the storage accounts list result. -type StorageListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of storage accounts in the key vault along with a link to the next page of storage accounts. - Value *[]StorageAccountItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of storage accounts. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageListResult. -func (slr StorageListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageListResultIterator provides access to a complete listing of StorageAccountItem values. -type StorageListResultIterator struct { - i int - page StorageListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *StorageListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *StorageListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter StorageListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter StorageListResultIterator) Response() StorageListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter StorageListResultIterator) Value() StorageAccountItem { - if !iter.page.NotDone() { - return StorageAccountItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the StorageListResultIterator type. -func NewStorageListResultIterator(page StorageListResultPage) StorageListResultIterator { - return StorageListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (slr StorageListResult) IsEmpty() bool { - return slr.Value == nil || len(*slr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (slr StorageListResult) hasNextLink() bool { - return slr.NextLink != nil && len(*slr.NextLink) != 0 -} - -// storageListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (slr StorageListResult) storageListResultPreparer(ctx context.Context) (*http.Request, error) { - if !slr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(slr.NextLink))) -} - -// StorageListResultPage contains a page of StorageAccountItem values. -type StorageListResultPage struct { - fn func(context.Context, StorageListResult) (StorageListResult, error) - slr StorageListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *StorageListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.slr) - if err != nil { - return err - } - page.slr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *StorageListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page StorageListResultPage) NotDone() bool { - return !page.slr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page StorageListResultPage) Response() StorageListResult { - return page.slr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page StorageListResultPage) Values() []StorageAccountItem { - if page.slr.IsEmpty() { - return nil - } - return *page.slr.Value -} - -// Creates a new instance of the StorageListResultPage type. -func NewStorageListResultPage(cur StorageListResult, getNextPage func(context.Context, StorageListResult) (StorageListResult, error)) StorageListResultPage { - return StorageListResultPage{ - fn: getNextPage, - slr: cur, - } -} - -// StorageRestoreParameters the secret restore parameters. -type StorageRestoreParameters struct { - // StorageBundleBackup - The backup blob associated with a storage account. (a URL-encoded base64 string) - StorageBundleBackup *string `json:"value,omitempty"` -} - -// SubjectAlternativeNames the subject alternate names of a X509 object. -type SubjectAlternativeNames struct { - // Emails - Email addresses. - Emails *[]string `json:"emails,omitempty"` - // DNSNames - Domain names. - DNSNames *[]string `json:"dns_names,omitempty"` - // Upns - User principal names. - Upns *[]string `json:"upns,omitempty"` -} - -// Trigger a condition to be satisfied for an action to be executed. -type Trigger struct { - // LifetimePercentage - Percentage of lifetime at which to trigger. Value should be between 1 and 99. - LifetimePercentage *int32 `json:"lifetime_percentage,omitempty"` - // DaysBeforeExpiry - Days before expiry to attempt renewal. Value should be between 1 and validity_in_months multiplied by 27. If validity_in_months is 36, then value should be between 1 and 972 (36 * 27). - DaysBeforeExpiry *int32 `json:"days_before_expiry,omitempty"` -} - -// X509CertificateProperties properties of the X509 component of a certificate. -type X509CertificateProperties struct { - // Subject - The subject name. Should be a valid X509 distinguished Name. - Subject *string `json:"subject,omitempty"` - // Ekus - The enhanced key usage. - Ekus *[]string `json:"ekus,omitempty"` - // SubjectAlternativeNames - The subject alternative names. - SubjectAlternativeNames *SubjectAlternativeNames `json:"sans,omitempty"` - // KeyUsage - List of key usages. - KeyUsage *[]KeyUsageType `json:"key_usage,omitempty"` - // ValidityInMonths - The duration that the certificate is valid in months. - ValidityInMonths *int32 `json:"validity_months,omitempty"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go deleted file mode 100644 index 60143005f3e..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package keyvault - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " keyvault/7.1" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go deleted file mode 100644 index 86694bd2555..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Package to provides helpers to ease working with pointer values of marshalled structures. -*/ -package to - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// String returns a string value for the passed string pointer. It returns the empty string if the -// pointer is nil. -func String(s *string) string { - if s != nil { - return *s - } - return "" -} - -// StringPtr returns a pointer to the passed string. -func StringPtr(s string) *string { - return &s -} - -// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil -// slice if the pointer is nil. -func StringSlice(s *[]string) []string { - if s != nil { - return *s - } - return nil -} - -// StringSlicePtr returns a pointer to the passed string slice. -func StringSlicePtr(s []string) *[]string { - return &s -} - -// StringMap returns a map of strings built from the map of string pointers. The empty string is -// used for nil pointers. -func StringMap(msp map[string]*string) map[string]string { - ms := make(map[string]string, len(msp)) - for k, sp := range msp { - if sp != nil { - ms[k] = *sp - } else { - ms[k] = "" - } - } - return ms -} - -// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. -func StringMapPtr(ms map[string]string) *map[string]*string { - msp := make(map[string]*string, len(ms)) - for k, s := range ms { - msp[k] = StringPtr(s) - } - return &msp -} - -// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. -func Bool(b *bool) bool { - if b != nil { - return *b - } - return false -} - -// BoolPtr returns a pointer to the passed bool. -func BoolPtr(b bool) *bool { - return &b -} - -// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int(i *int) int { - if i != nil { - return *i - } - return 0 -} - -// IntPtr returns a pointer to the passed int. -func IntPtr(i int) *int { - return &i -} - -// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int32(i *int32) int32 { - if i != nil { - return *i - } - return 0 -} - -// Int32Ptr returns a pointer to the passed int32. -func Int32Ptr(i int32) *int32 { - return &i -} - -// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int64(i *int64) int64 { - if i != nil { - return *i - } - return 0 -} - -// Int64Ptr returns a pointer to the passed int64. -func Int64Ptr(i int64) *int64 { - return &i -} - -// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float32(i *float32) float32 { - if i != nil { - return *i - } - return 0.0 -} - -// Float32Ptr returns a pointer to the passed float32. -func Float32Ptr(i float32) *float32 { - return &i -} - -// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float64(i *float64) float64 { - if i != nil { - return *i - } - return 0.0 -} - -// Float64Ptr returns a pointer to the passed float64. -func Float64Ptr(i float64) *float64 { - return &i -} - -// ByteSlicePtr returns a pointer to the passed byte slice. -func ByteSlicePtr(b []byte) *[]byte { - return &b -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go deleted file mode 100644 index b7310f6b868..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package to - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go deleted file mode 100644 index fed156dbf6e..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go +++ /dev/null @@ -1,48 +0,0 @@ -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" -) - -// Error is the type that's returned when the validation of an APIs arguments constraints fails. -type Error struct { - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // Message is the error message. - Message string -} - -// Error returns a string containing the details of the validation failure. -func (e Error) Error() string { - return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message) -} - -// NewError creates a new Error object with the specified parameters. -// message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) Error { - return Error{ - PackageType: packageType, - Method: method, - Message: fmt.Sprintf(message, args...), - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go deleted file mode 100644 index cf1436291a7..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go deleted file mode 100644 index ff41cfe0796..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go +++ /dev/null @@ -1,406 +0,0 @@ -/* -Package validation provides methods for validating parameter value using reflection. -*/ -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "reflect" - "regexp" - "strings" -) - -// Disabled controls if parameter validation should be globally disabled. The default is false. -var Disabled bool - -// Constraint stores constraint name, target field name -// Rule and chain validations. -type Constraint struct { - - // Target field name for validation. - Target string - - // Constraint name e.g. minLength, MaxLength, Pattern, etc. - Name string - - // Rule for constraint e.g. greater than 10, less than 5 etc. - Rule interface{} - - // Chain Validations for struct type - Chain []Constraint -} - -// Validation stores parameter-wise validation. -type Validation struct { - TargetValue interface{} - Constraints []Constraint -} - -// Constraint list -const ( - Empty = "Empty" - Null = "Null" - ReadOnly = "ReadOnly" - Pattern = "Pattern" - MaxLength = "MaxLength" - MinLength = "MinLength" - MaxItems = "MaxItems" - MinItems = "MinItems" - MultipleOf = "MultipleOf" - UniqueItems = "UniqueItems" - InclusiveMaximum = "InclusiveMaximum" - ExclusiveMaximum = "ExclusiveMaximum" - ExclusiveMinimum = "ExclusiveMinimum" - InclusiveMinimum = "InclusiveMinimum" -) - -// Validate method validates constraints on parameter -// passed in validation array. -func Validate(m []Validation) error { - if Disabled { - return nil - } - for _, item := range m { - v := reflect.ValueOf(item.TargetValue) - for _, constraint := range item.Constraints { - var err error - switch v.Kind() { - case reflect.Ptr: - err = validatePtr(v, constraint) - case reflect.String: - err = validateString(v, constraint) - case reflect.Struct: - err = validateStruct(v, constraint) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - err = validateInt(v, constraint) - case reflect.Float32, reflect.Float64: - err = validateFloat(v, constraint) - case reflect.Array, reflect.Slice, reflect.Map: - err = validateArrayMap(v, constraint) - default: - err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) - } - - if err != nil { - return err - } - } - } - return nil -} - -func validateStruct(x reflect.Value, v Constraint, name ...string) error { - //Get field name from target name which is in format a.b.c - s := strings.Split(v.Target, ".") - f := x.FieldByName(s[len(s)-1]) - if isZero(f) { - return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) - } - - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(f), - Constraints: []Constraint{v}, - }, - }) -} - -func validatePtr(x reflect.Value, v Constraint) error { - if v.Name == ReadOnly { - if !x.IsNil() { - return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") - } - return nil - } - if x.IsNil() { - return checkNil(x, v) - } - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x.Elem()), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateInt(x reflect.Value, v Constraint) error { - i := x.Int() - r, ok := toInt64(v.Rule) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case MultipleOf: - if i%r != 0 { - return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) - } - case ExclusiveMinimum: - if i <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if i >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if i < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if i > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) - } - return nil -} - -func validateFloat(x reflect.Value, v Constraint) error { - f := x.Float() - r, ok := v.Rule.(float64) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case ExclusiveMinimum: - if f <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if f >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if f < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if f > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) - } - return nil -} - -func validateString(x reflect.Value, v Constraint) error { - s := x.String() - switch v.Name { - case Empty: - if len(s) == 0 { - return checkEmpty(x, v) - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - if !reg.MatchString(s) { - return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) - } - case MaxLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) - } - case MinLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) - } - case ReadOnly: - if len(s) > 0 { - return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateArrayMap(x reflect.Value, v Constraint) error { - switch v.Name { - case Null: - if x.IsNil() { - return checkNil(x, v) - } - case Empty: - if x.IsNil() || x.Len() == 0 { - return checkEmpty(x, v) - } - case MaxItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) - } - case MinItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) - } - case UniqueItems: - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - if !checkForUniqueInArray(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else if x.Kind() == reflect.Map { - if !checkForUniqueInMap(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else { - return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) - } - case ReadOnly: - if x.Len() != 0 { - return createError(x, v, "readonly parameter; must send as nil or empty in request") - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - keys := x.MapKeys() - for _, k := range keys { - if !reg.MatchString(k.String()) { - return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) - } - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func checkNil(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - if v.Rule.(bool) { - return createError(x, v, "value can not be null; required parameter") - } - return nil -} - -func checkEmpty(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - - if v.Rule.(bool) { - return createError(x, v, "value can not be null or empty; required parameter") - } - return nil -} - -func checkForUniqueInArray(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - arrOfInterface := make([]interface{}, x.Len()) - - for i := 0; i < x.Len(); i++ { - arrOfInterface[i] = x.Index(i).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range arrOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func checkForUniqueInMap(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - mapOfInterface := make(map[interface{}]interface{}, x.Len()) - - keys := x.MapKeys() - for _, k := range keys { - mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range mapOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func getInterfaceValue(x reflect.Value) interface{} { - if x.Kind() == reflect.Invalid { - return nil - } - return x.Interface() -} - -func isZero(x interface{}) bool { - return x == reflect.Zero(reflect.TypeOf(x)).Interface() -} - -func createError(x reflect.Value, v Constraint, err string) error { - return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", - v.Target, v.Name, getInterfaceValue(x), err) -} - -func toInt64(v interface{}) (int64, bool) { - if i64, ok := v.(int64); ok { - return i64, true - } - // older generators emit max constants as int, so if int64 fails fall back to int - if i32, ok := v.(int); ok { - return int64(i32), true - } - return 0, false -} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE new file mode 100644 index 00000000000..3d8b93bc798 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go new file mode 100644 index 00000000000..19210883bac --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package cache allows third parties to implement external storage for caching token data +for distributed systems or multiple local applications access. + +The data stored and extracted will represent the entire cache. Therefore it is recommended +one msal instance per user. This data is considered opaque and there are no guarantees to +implementers on the format being passed. +*/ +package cache + +import "context" + +// Marshaler marshals data from an internal cache to bytes that can be stored. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Unmarshaler unmarshals data from a storage medium into the internal cache, overwriting it. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Serializer can serialize the cache to binary or from binary into the cache. +type Serializer interface { + Marshaler + Unmarshaler +} + +// ExportHints are suggestions for storing data. +type ExportHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ReplaceHints are suggestions for loading data. +type ReplaceHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ExportReplace exports and replaces in-memory cache data. It doesn't support nil Context or +// define the outcome of passing one. A Context without a timeout must receive a default timeout +// specified by the implementor. Retries must be implemented inside the implementation. +type ExportReplace interface { + // Replace replaces the cache with what is in external storage. Implementors should honor + // Context cancellations and return context.Canceled or context.DeadlineExceeded in those cases. + Replace(ctx context.Context, cache Unmarshaler, hints ReplaceHints) error + // Export writes the binary representation of the cache (cache.Marshal()) to external storage. + // This is considered opaque. Context cancellations should be honored as in Replace. + Export(ctx context.Context, cache Marshaler, hints ExportHints) error +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go new file mode 100644 index 00000000000..6612feb4bf8 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -0,0 +1,685 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package confidential provides a client for authentication of "confidential" applications. +A "confidential" application is defined as an app that run on servers. They are considered +difficult to access and for that reason capable of keeping an application secret. +Confidential clients can hold configuration-time secrets. +*/ +package confidential + +import ( + "context" + "crypto" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +/* +Design note: + +confidential.Client uses base.Client as an embedded type. base.Client statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. + +Duplicate Calls shared between public.Client and this package: +There is some duplicate call options provided here that are the same as in public.Client . This +is a design choices. Go proverb(https://www.youtube.com/watch?v=PAAkCSZUG1c&t=9m28s): +"a little copying is better than a little dependency". Yes, we could have another package with +shared options (fail). That divides like 2 options from all others which makes the user look +through more docs. We can have all clients in one package, but I think separate packages +here makes for better naming (public.Client vs client.PublicClient). So I chose a little +duplication. + +.Net People, Take note on X509: +This uses x509.Certificates and private keys. x509 does not store private keys. .Net +has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net +added, it doesn't exist in real life. As such I've put a PEM decoder into here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be include in the package documentation. + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file +// must contain the public certificate and the private key. If a PEM block is encrypted and +// password is not an empty string, it attempts to decrypt the PEM blocks using the password. +// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf. +func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) { + var certs []*x509.Certificate + var priv crypto.PrivateKey + for { + block, rest := pem.Decode(pemData) + if block == nil { + break + } + + //nolint:staticcheck // x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock are deprecated. They are used here only to support a usecase. + if x509.IsEncryptedPEMBlock(block) { + b, err := x509.DecryptPEMBlock(block, []byte(password)) + if err != nil { + return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err) + } + block, _ = pem.Decode(b) + if block == nil { + return nil, nil, fmt.Errorf("encounter encrypted PEM block that did not decode") + } + } + + switch block.Type { + case "CERTIFICATE": + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err) + } + certs = append(certs, cert) + case "PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + + var err error + priv, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + case "RSA PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + var err error + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + } + pemData = rest + } + + if len(certs) == 0 { + return nil, nil, fmt.Errorf("no certificates found") + } + + if priv == nil { + return nil, nil, fmt.Errorf("no private key found") + } + + return certs, priv, nil +} + +// AssertionRequestOptions has required information for client assertion claims +type AssertionRequestOptions = exported.AssertionRequestOptions + +// Credential represents the credential used in confidential client flows. +type Credential struct { + secret string + + cert *x509.Certificate + key crypto.PrivateKey + x5c []string + + assertionCallback func(context.Context, AssertionRequestOptions) (string, error) + + tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error) +} + +// toInternal returns the accesstokens.Credential that is used internally. The current structure of the +// code requires that client.go, requests.go and confidential.go share a credential type without +// having import recursion. That requires the type used between is in a shared package. Therefore +// we have this. +func (c Credential) toInternal() (*accesstokens.Credential, error) { + if c.secret != "" { + return &accesstokens.Credential{Secret: c.secret}, nil + } + if c.cert != nil { + if c.key == nil { + return nil, errors.New("missing private key for certificate") + } + return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil + } + if c.key != nil { + return nil, errors.New("missing certificate for private key") + } + if c.assertionCallback != nil { + return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil + } + if c.tokenProvider != nil { + return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil + } + return nil, errors.New("invalid credential") +} + +// NewCredFromSecret creates a Credential from a secret. +func NewCredFromSecret(secret string) (Credential, error) { + if secret == "" { + return Credential{}, errors.New("secret can't be empty string") + } + return Credential{secret: secret}, nil +} + +// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions +// authenticating the application. The callback must be thread safe. +func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential { + return Credential{assertionCallback: callback} +} + +// NewCredFromCert creates a Credential from a certificate or chain of certificates and an RSA private key +// as returned by [CertFromPEM]. +func NewCredFromCert(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) { + cred := Credential{key: key} + k, ok := key.(*rsa.PrivateKey) + if !ok { + return cred, errors.New("key must be an RSA key") + } + for _, cert := range certs { + if cert == nil { + // not returning an error here because certs may still contain a sufficient cert/key pair + continue + } + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 { + // We know this is the signing cert because its public key matches the given private key. + // This cert must be first in x5c. + cred.cert = cert + cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...) + } else { + cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if cred.cert == nil { + return cred, errors.New("key doesn't match any certificate") + } + return cred, nil +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters = exported.TokenProviderParameters + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult = exported.TokenProviderResult + +// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function +// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't +// useful to applications in general because the token provider must implement all authentication logic. +func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential { + return Credential{tokenProvider: provider} +} + +// AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service. +func AutoDetectRegion() string { + return "TryAutoDetect" +} + +// Client is a representation of authentication client for confidential applications as defined in the +// package doc. A new Client should be created PER SERVICE USER. +// For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications +type Client struct { + base base.Client + cred *accesstokens.Credential +} + +// clientOptions are optional settings for New(). These options are set using various functions +// returning Option calls. +type clientOptions struct { + accessor cache.ExportReplace + authority, azureRegion string + capabilities []string + disableInstanceDiscovery, sendX5C bool + httpClient ops.HTTPClient +} + +// Option is an optional argument to New(). +type Option func(o *clientOptions) + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C() Option { + return func(o *clientOptions) { + o.sendX5C = true + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region. +// Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/. +// See https://aka.ms/region-map for more details on region names. +// The region value should be short region name for the region where the service is deployed. +// For example "centralus" is short name for region Central US. +// Not all auth flows can use the regional token service. +// Service To Service (client credential flow) tokens can be obtained from the regional service. +// Requires configuration at the tenant level. +// Auto-detection works on a limited number of Azure artifacts (VMs, Azure functions). +// If auto-detection fails, the non-regional endpoint will be used. +// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail. +func WithAzureRegion(val string) Option { + return func(o *clientOptions) { + o.azureRegion = val + } +} + +// New is the constructor for Client. authority is the URL of a token authority such as "https://login.microsoftonline.com/". +// If the Client will connect directly to AD FS, use "adfs" for the tenant. clientID is the application's client ID (also called its +// "application ID"). +func New(authority, clientID string, cred Credential, options ...Option) (Client, error) { + internalCred, err := cred.toInternal() + if err != nil { + return Client{}, err + } + + opts := clientOptions{ + authority: authority, + // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache + disableInstanceDiscovery: cred.tokenProvider != nil, + httpClient: shared.DefaultClient, + } + for _, o := range options { + o(&opts) + } + baseOpts := []base.Option{ + base.WithCacheAccessor(opts.accessor), + base.WithClientCapabilities(opts.capabilities), + base.WithInstanceDiscovery(!opts.disableInstanceDiscovery), + base.WithRegionDetection(opts.azureRegion), + base.WithX5C(opts.sendX5C), + } + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), baseOpts...) + if err != nil { + return Client{}, err + } + base.AuthParams.IsConfidentialClient = true + + return Client{base: base, cred: internalCred}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByCredentialOptions: + t.claims = claims + case *acquireTokenOnBehalfOfOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByCredentialOptions: + t.tenantID = tenantID + case *acquireTokenOnBehalfOfOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + if o.claims != "" { + return AuthResult{}, errors.New("call another AcquireToken method to request a new token having these claims") + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + RequestType: accesstokens.ATConfidential, + Credential: cca.cred, + IsAppCache: o.account.IsZero(), + TenantID: o.tenantID, + } + + return cca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATConfidential, + Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return cca.base.AcquireTokenByAuthCode(ctx, params) +} + +// acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential +type acquireTokenByCredentialOptions struct { + claims, tenantID string +} + +// AcquireByCredentialOption is implemented by options for AcquireTokenByCredential +type AcquireByCredentialOption interface { + acquireByCredOption() +} + +// AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, opts ...AcquireByCredentialOption) (AuthResult, error) { + o := acquireTokenByCredentialOptions{} + err := options.ApplyOptions(&o, opts) + if err != nil { + return AuthResult{}, err + } + authParams, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATClientCredentials + authParams.Claims = o.claims + + token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) + if err != nil { + return AuthResult{}, err + } + return cca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf +type acquireTokenOnBehalfOfOptions struct { + claims, tenantID string +} + +// AcquireOnBehalfOfOption is implemented by options for AcquireTokenOnBehalfOf +type AcquireOnBehalfOfOption interface { + acquireOBOOption() +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +// Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, opts ...AcquireOnBehalfOfOption) (AuthResult, error) { + o := acquireTokenOnBehalfOfOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + params := base.AcquireTokenOnBehalfOfParameters{ + Scopes: scopes, + UserAssertion: userAssertion, + Claims: o.claims, + Credential: cca.cred, + TenantID: o.tenantID, + } + return cca.base.AcquireTokenOnBehalfOf(ctx, params) +} + +// Account gets the account in the token cache with the specified homeAccountID. +func (cca Client) Account(ctx context.Context, accountID string) (Account, error) { + return cca.base.Account(ctx, accountID) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (cca Client) RemoveAccount(ctx context.Context, account Account) error { + return cca.base.RemoveAccount(ctx, account) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md new file mode 100644 index 00000000000..7ef7862fe53 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md @@ -0,0 +1,111 @@ +# MSAL Error Design + +Author: Abhidnya Patil(abhidnya.patil@microsoft.com) + +Contributors: + +- John Doak(jdoak@microsoft.com) +- Keegan Caruso(Keegan.Caruso@microsoft.com) +- Joel Hendrix(jhendrix@microsoft.com) + +## Background + +Errors in MSAL are intended for app developers to troubleshoot and not for displaying to end-users. + +### Go error handling vs other MSAL languages + +Most modern languages use exception based errors. Simply put, you "throw" an exception and it must be caught at some routine in the upper stack or it will eventually crash the program. + +Go doesn't use exceptions, instead it relies on multiple return values, one of which can be the builtin error interface type. It is up to the user to decide what to do. + +### Go custom error types + +Errors can be created in Go by simply using errors.New() or fmt.Errorf() to create an "error". + +Custom errors can be created in multiple ways. One of the more robust ways is simply to satisfy the error interface: + +```go +type MyCustomErr struct { + Msg string +} +func (m MyCustomErr) Error() string { // This implements "error" + return m.Msg +} +``` + +### MSAL Error Goals + +- Provide diagnostics to the user and for tickets that can be used to track down bugs or client misconfigurations +- Detect errors that are transitory and can be retried +- Allow the user to identify certain errors that the program can respond to, such a informing the user for the need to do an enrollment + +## Implementing Client Side Errors + +Client side errors indicate a misconfiguration or passing of bad arguments that is non-recoverable. Retrying isn't possible. + +These errors can simply be standard Go errors created by errors.New() or fmt.Errorf(). If down the line we need a custom error, we can introduce it, but for now the error messages just need to be clear on what the issue was. + +## Implementing Service Side Errors + +Service side errors occur when an external RPC responds either with an HTTP error code or returns a message that includes an error. + +These errors can be transitory (please slow down) or permanent (HTTP 404). To provide our diagnostic goals, we require the ability to differentiate these errors from other errors. + +The current implementation includes a specialized type that captures any error from the server: + +```go +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS stuff we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} +``` + +A user will always receive the most concise error we provide. They can tell if it is a server side error using Go error package: + +```go +var callErr CallErr +if errors.As(err, &callErr) { + ... +} +``` + +We provide a Verbose() function that can retrieve the most verbose message from any error we provide: + +```go +fmt.Println(errors.Verbose(err)) +``` + +If further differentiation is required, we can add custom errors that use Go error wrapping on top of CallErr to achieve our diagnostic goals (such as detecting when to retry a call due to transient errors). + +CallErr is always thrown from the comm package (which handles all http requests) and looks similar to: + +```go +return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, ErrorResponse), //ErrorResponse is the json body extracted from the http response + } +``` + +## Future Decisions + +The ability to retry calls needs to have centralized responsibility. Either the user is doing it or the client is doing it. + +If the user should be responsible, our errors package will include a CanRetry() function that will inform the user if the error provided to them is retryable. This is based on the http error code and possibly the type of error that was returned. It would also include a sleep time if the server returned an amount of time to wait. + +Otherwise we will do this internally and retries will be left to us. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go new file mode 100644 index 00000000000..c9b8dbed088 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package errors + +import ( + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strings" + + "github.com/kylelemons/godebug/pretty" +) + +var prettyConf = &pretty.Config{ + IncludeUnexported: false, + SkipZeroFields: true, + TrackCycles: true, + Formatter: map[reflect.Type]interface{}{ + reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string { + b, err := io.ReadAll(r) + if err != nil { + return "could not read io.Reader content" + } + return string(b) + }, + }, +} + +type verboser interface { + Verbose() string +} + +// Verbose prints the most verbose error that the error message has. +func Verbose(err error) string { + build := strings.Builder{} + for { + if err == nil { + break + } + if v, ok := err.(verboser); ok { + build.WriteString(v.Verbose()) + } else { + build.WriteString(err.Error()) + } + err = errors.Unwrap(err) + } + return build.String() +} + +// New is equivalent to errors.New(). +func New(text string) error { + return errors.New(text) +} + +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + // Resp contains response body + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} + +// Is reports whether any error in errors chain matches target. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in errors chain that matches target, +// and if so, sets target to that error value and returns true. +// Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go new file mode 100644 index 00000000000..5f68384f68b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -0,0 +1,467 @@ +// Package base contains a "Base" client that is used by the external public.Client and confidential.Client. +// Base holds shared attributes that must be available to both clients and methods that act as +// shared calls. +package base + +import ( + "context" + "errors" + "fmt" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +const ( + // AuthorityPublicCloud is the default AAD authority host + AuthorityPublicCloud = "https://login.microsoftonline.com/common" + scopeSeparator = " " +) + +// manager provides an internal cache. It is defined to allow faking the cache in tests. +// In production it's a *storage.Manager or *storage.PartitionedManager. +type manager interface { + cache.Serializer + Read(context.Context, authority.AuthParams) (storage.TokenResponse, error) + Write(authority.AuthParams, accesstokens.TokenResponse) (shared.Account, error) +} + +// accountManager is a manager that also caches accounts. In production it's a *storage.Manager. +type accountManager interface { + manager + AllAccounts() []shared.Account + Account(homeAccountID string) shared.Account + RemoveAccount(account shared.Account, clientID string) +} + +// AcquireTokenSilentParameters contains the parameters to acquire a token silently (from cache). +type AcquireTokenSilentParameters struct { + Scopes []string + Account shared.Account + RequestType accesstokens.AppType + Credential *accesstokens.Credential + IsAppCache bool + TenantID string + UserAssertion string + AuthorizationType authority.AuthorizeType + Claims string +} + +// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. +// To use PKCE, set the CodeChallengeParameter. +// Code challenges are used to secure authorization code grants; for more information, visit +// https://tools.ietf.org/html/rfc7636. +type AcquireTokenAuthCodeParameters struct { + Scopes []string + Code string + Challenge string + Claims string + RedirectURI string + AppType accesstokens.AppType + Credential *accesstokens.Credential + TenantID string +} + +type AcquireTokenOnBehalfOfParameters struct { + Scopes []string + Claims string + Credential *accesstokens.Credential + TenantID string + UserAssertion string +} + +// AuthResult contains the results of one token acquisition operation in PublicClientApplication +// or ConfidentialClientApplication. For details see https://aka.ms/msal-net-authenticationresult +type AuthResult struct { + Account shared.Account + IDToken accesstokens.IDToken + AccessToken string + ExpiresOn time.Time + GrantedScopes []string + DeclinedScopes []string +} + +// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). +func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { + if err := storageTokenResponse.AccessToken.Validate(); err != nil { + return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err) + } + + account := storageTokenResponse.Account + accessToken := storageTokenResponse.AccessToken.Secret + grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator) + + // Checking if there was an ID token in the cache; this will throw an error in the case of confidential client applications. + var idToken accesstokens.IDToken + if !storageTokenResponse.IDToken.IsZero() { + err := idToken.UnmarshalJSON([]byte(storageTokenResponse.IDToken.Secret)) + if err != nil { + return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) + } + } + return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil +} + +// NewAuthResult creates an AuthResult. +func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Account) (AuthResult, error) { + if len(tokenResponse.DeclinedScopes) > 0 { + return AuthResult{}, fmt.Errorf("token response failed because declined scopes are present: %s", strings.Join(tokenResponse.DeclinedScopes, ",")) + } + return AuthResult{ + Account: account, + IDToken: tokenResponse.IDToken, + AccessToken: tokenResponse.AccessToken, + ExpiresOn: tokenResponse.ExpiresOn.T, + GrantedScopes: tokenResponse.GrantedScopes.Slice, + }, nil +} + +// Client is a base client that provides access to common methods and primatives that +// can be used by multiple clients. +type Client struct { + Token *oauth.Client + manager accountManager // *storage.Manager or fakeManager in tests + // pmanager is a partitioned cache for OBO authentication. *storage.PartitionedManager or fakeManager in tests + pmanager manager + + AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New(). + cacheAccessor cache.ExportReplace + cacheAccessorMu *sync.RWMutex +} + +// Option is an optional argument to the New constructor. +type Option func(c *Client) error + +// WithCacheAccessor allows you to set some type of cache for storing authentication tokens. +func WithCacheAccessor(ca cache.ExportReplace) Option { + return func(c *Client) error { + if ca != nil { + c.cacheAccessor = ca + } + return nil + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(c *Client) error { + var err error + if len(capabilities) > 0 { + cc, err := authority.NewClientCapabilities(capabilities) + if err == nil { + c.AuthParams.Capabilities = cc + } + } + return err + } +} + +// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user +func WithKnownAuthorityHosts(hosts []string) Option { + return func(c *Client) error { + cp := make([]string, len(hosts)) + copy(cp, hosts) + c.AuthParams.KnownAuthorityHosts = cp + return nil + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C(sendX5C bool) Option { + return func(c *Client) error { + c.AuthParams.SendX5C = sendX5C + return nil + } +} + +func WithRegionDetection(region string) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.Region = region + return nil + } +} + +func WithInstanceDiscovery(instanceDiscoveryEnabled bool) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.ValidateAuthority = instanceDiscoveryEnabled + c.AuthParams.AuthorityInfo.InstanceDiscoveryDisabled = !instanceDiscoveryEnabled + return nil + } +} + +// New is the constructor for Base. +func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) { + //By default, validateAuthority is set to true and instanceDiscoveryDisabled is set to false + authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true, false) + if err != nil { + return Client{}, err + } + authParams := authority.NewAuthParams(clientID, authInfo) + client := Client{ // Note: Hey, don't even THINK about making Base into *Base. See "design notes" in public.go and confidential.go + Token: token, + AuthParams: authParams, + cacheAccessorMu: &sync.RWMutex{}, + manager: storage.New(token), + pmanager: storage.NewPartitionedManager(token), + } + for _, o := range options { + if err = o(&client); err != nil { + break + } + } + return client, err + +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, authParams authority.AuthParams) (string, error) { + endpoints, err := b.Token.ResolveEndpoints(ctx, authParams.AuthorityInfo, "") + if err != nil { + return "", err + } + + baseURL, err := url.Parse(endpoints.AuthorizationEndpoint) + if err != nil { + return "", err + } + + claims, err := authParams.MergeCapabilitiesAndClaims() + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("client_id", clientID) + v.Add("response_type", "code") + v.Add("redirect_uri", redirectURI) + v.Add("scope", strings.Join(scopes, scopeSeparator)) + if authParams.State != "" { + v.Add("state", authParams.State) + } + if claims != "" { + v.Add("claims", claims) + } + if authParams.CodeChallenge != "" { + v.Add("code_challenge", authParams.CodeChallenge) + } + if authParams.CodeChallengeMethod != "" { + v.Add("code_challenge_method", authParams.CodeChallengeMethod) + } + if authParams.LoginHint != "" { + v.Add("login_hint", authParams.LoginHint) + } + if authParams.Prompt != "" { + v.Add("prompt", authParams.Prompt) + } + if authParams.DomainHint != "" { + v.Add("domain_hint", authParams.DomainHint) + } + // There were left over from an implementation that didn't use any of these. We may + // need to add them later, but as of now aren't needed. + /* + if p.ResponseMode != "" { + urlParams.Add("response_mode", p.ResponseMode) + } + */ + baseURL.RawQuery = v.Encode() + return baseURL.String(), nil +} + +func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { + ar := AuthResult{} + // when tenant == "", the caller didn't specify a tenant and WithTenant will choose the client's configured tenant + tenant := silent.TenantID + authParams, err := b.AuthParams.WithTenant(tenant) + if err != nil { + return ar, err + } + authParams.Scopes = silent.Scopes + authParams.HomeAccountID = silent.Account.HomeAccountID + authParams.AuthorizationType = silent.AuthorizationType + authParams.Claims = silent.Claims + authParams.UserAssertion = silent.UserAssertion + + m := b.pmanager + if authParams.AuthorizationType != authority.ATOnBehalfOf { + authParams.AuthorizationType = authority.ATRefreshToken + m = b.manager + } + if b.cacheAccessor != nil { + key := authParams.CacheKey(silent.IsAppCache) + b.cacheAccessorMu.RLock() + err = b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + b.cacheAccessorMu.RUnlock() + } + if err != nil { + return ar, err + } + storageTokenResponse, err := m.Read(ctx, authParams) + if err != nil { + return ar, err + } + + // ignore cached access tokens when given claims + if silent.Claims == "" { + ar, err = AuthResultFromStorage(storageTokenResponse) + if err == nil { + return ar, err + } + } + + // redeem a cached refresh token, if available + if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { + return ar, errors.New("no token found") + } + var cc *accesstokens.Credential + if silent.RequestType == accesstokens.ATConfidential { + cc = silent.Credential + } + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) + if err != nil { + return ar, err + } + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { + authParams, err := b.AuthParams.WithTenant(authCodeParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Claims = authCodeParams.Claims + authParams.Scopes = authCodeParams.Scopes + authParams.Redirecturi = authCodeParams.RedirectURI + authParams.AuthorizationType = authority.ATAuthCode + + var cc *accesstokens.Credential + if authCodeParams.AppType == accesstokens.ATConfidential { + cc = authCodeParams.Credential + authParams.IsConfidentialClient = true + } + + req, err := accesstokens.NewCodeChallengeRequest(authParams, authCodeParams.AppType, cc, authCodeParams.Code, authCodeParams.Challenge) + if err != nil { + return AuthResult{}, err + } + + token, err := b.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) { + var ar AuthResult + silentParameters := AcquireTokenSilentParameters{ + Scopes: onBehalfOfParams.Scopes, + RequestType: accesstokens.ATConfidential, + Credential: onBehalfOfParams.Credential, + UserAssertion: onBehalfOfParams.UserAssertion, + AuthorizationType: authority.ATOnBehalfOf, + TenantID: onBehalfOfParams.TenantID, + Claims: onBehalfOfParams.Claims, + } + ar, err := b.AcquireTokenSilent(ctx, silentParameters) + if err == nil { + return ar, err + } + authParams, err := b.AuthParams.WithTenant(onBehalfOfParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.AuthorizationType = authority.ATOnBehalfOf + authParams.Claims = onBehalfOfParams.Claims + authParams.Scopes = onBehalfOfParams.Scopes + authParams.UserAssertion = onBehalfOfParams.UserAssertion + token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) + if err == nil { + ar, err = b.AuthResultFromToken(ctx, authParams, token, true) + } + return ar, err +} + +func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { + if !cacheWrite { + return NewAuthResult(token, shared.Account{}) + } + var m manager = b.manager + if authParams.AuthorizationType == authority.ATOnBehalfOf { + m = b.pmanager + } + key := token.CacheKey(authParams) + if b.cacheAccessor != nil { + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + err := b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return AuthResult{}, err + } + } + account, err := m.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + ar, err := NewAuthResult(token, account) + if err == nil && b.cacheAccessor != nil { + err = b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) + } + return ar, err +} + +func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return nil, err + } + } + return b.manager.AllAccounts(), nil +} + +func (b Client) Account(ctx context.Context, homeAccountID string) (shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + authParams := b.AuthParams // This is a copy, as we don't have a pointer receiver and .AuthParams is not a pointer. + authParams.AuthorizationType = authority.AccountByID + authParams.HomeAccountID = homeAccountID + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return shared.Account{}, err + } + } + return b.manager.Account(homeAccountID), nil +} + +// RemoveAccount removes all the ATs, RTs and IDTs from the cache associated with this account. +func (b Client) RemoveAccount(ctx context.Context, account shared.Account) error { + if b.cacheAccessor == nil { + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return nil + } + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return err + } + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go new file mode 100644 index 00000000000..548c2faebf9 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -0,0 +1,200 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type Contract struct { + AccessTokens map[string]AccessToken `json:"AccessToken,omitempty"` + RefreshTokens map[string]accesstokens.RefreshToken `json:"RefreshToken,omitempty"` + IDTokens map[string]IDToken `json:"IdToken,omitempty"` + Accounts map[string]shared.Account `json:"Account,omitempty"` + AppMetaData map[string]AppMetaData `json:"AppMetadata,omitempty"` + + AdditionalFields map[string]interface{} +} + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type InMemoryContract struct { + AccessTokensPartition map[string]map[string]AccessToken + RefreshTokensPartition map[string]map[string]accesstokens.RefreshToken + IDTokensPartition map[string]map[string]IDToken + AccountsPartition map[string]map[string]shared.Account + AppMetaData map[string]AppMetaData +} + +// NewContract is the constructor for Contract. +func NewInMemoryContract() *InMemoryContract { + return &InMemoryContract{ + AccessTokensPartition: map[string]map[string]AccessToken{}, + RefreshTokensPartition: map[string]map[string]accesstokens.RefreshToken{}, + IDTokensPartition: map[string]map[string]IDToken{}, + AccountsPartition: map[string]map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + } +} + +// NewContract is the constructor for Contract. +func NewContract() *Contract { + return &Contract{ + AccessTokens: map[string]AccessToken{}, + RefreshTokens: map[string]accesstokens.RefreshToken{}, + IDTokens: map[string]IDToken{}, + Accounts: map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + AdditionalFields: map[string]interface{}{}, + } +} + +// AccessToken is the JSON representation of a MSAL access token for encoding to storage. +type AccessToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + Scopes string `json:"target,omitempty"` + ExpiresOn internalTime.Unix `json:"expires_on,omitempty"` + ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` + CachedAt internalTime.Unix `json:"cached_at,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccessToken is the constructor for AccessToken. +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken { + return AccessToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "AccessToken", + ClientID: clientID, + Secret: token, + Scopes: scopes, + CachedAt: internalTime.Unix{T: cachedAt.UTC()}, + ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, + ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AccessToken) Key() string { + return strings.Join( + []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, + shared.CacheKeySeparator, + ) +} + +// FakeValidate enables tests to fake access token validation +var FakeValidate func(AccessToken) error + +// Validate validates that this AccessToken can be used. +func (a AccessToken) Validate() error { + if FakeValidate != nil { + return FakeValidate(a) + } + if a.CachedAt.T.After(time.Now()) { + return errors.New("access token isn't valid, it was cached at a future time") + } + if a.ExpiresOn.T.Before(time.Now().Add(5 * time.Minute)) { + return fmt.Errorf("access token is expired") + } + if a.CachedAt.T.IsZero() { + return fmt.Errorf("access token does not have CachedAt set") + } + return nil +} + +// IDToken is the JSON representation of an MSAL id token for encoding to storage. +type IDToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + AdditionalFields map[string]interface{} +} + +// IsZero determines if IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// NewIDToken is the constructor for IDToken. +func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken { + return IDToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "IDToken", + ClientID: clientID, + Secret: idToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (id IDToken) Key() string { + return strings.Join( + []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm}, + shared.CacheKeySeparator, + ) +} + +// AppMetaData is the JSON representation of application metadata for encoding to storage. +type AppMetaData struct { + FamilyID string `json:"family_id,omitempty"` + ClientID string `json:"client_id,omitempty"` + Environment string `json:"environment,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAppMetaData is the constructor for AppMetaData. +func NewAppMetaData(familyID, clientID, environment string) AppMetaData { + return AppMetaData{ + FamilyID: familyID, + ClientID: clientID, + Environment: environment, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AppMetaData) Key() string { + return strings.Join( + []string{"AppMetaData", a.Environment, a.ClientID}, + shared.CacheKeySeparator, + ) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go new file mode 100644 index 00000000000..87d7d797b3e --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -0,0 +1,436 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// PartitionedManager is a partitioned in-memory cache of access tokens, accounts and meta data. +type PartitionedManager struct { + contract *InMemoryContract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// NewPartitionedManager is the constructor for PartitionedManager. +func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { + m := &PartitionedManager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewInMemoryContract() + return m +} + +// Read reads a storage token from the cache if it exists. +func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + userAssertionHash := authParameters.AssertionHash() + partitionKeyFromRequest := userAssertionHash + + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) + if err == nil { + tr.AccessToken = accessToken + } + idToken, err := m.readIDToken(aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken)) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(aliases, realm, userAssertionHash, idToken.HomeAccountID) + if err == nil { + tr.Account = account + } + return tr, nil +} + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + userAssertionHash := authParameters.AssertionHash() + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + refreshToken.UserAssertionHash = userAssertionHash + } + if err := m.writeRefreshToken(refreshToken, getPartitionKeyRefreshToken(refreshToken)); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + accessToken.UserAssertionHash = userAssertionHash // get Hash method on this + } + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken, getPartitionKeyAccessToken(accessToken)); err != nil { + return account, err + } + } else { + return shared.Account{}, err + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + idToken.UserAssertionHash = userAssertionHash + } + if err := m.writeIDToken(idToken, getPartitionKeyIDToken(idToken)); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + account.UserAssertionHash = userAssertionHash + } + if err := m.writeAccount(account, getPartitionKeyAccount(account)); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *PartitionedManager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *PartitionedManager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok { + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range accessTokens { + if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at, nil + } + } + } + } + } + return AccessToken{}, fmt.Errorf("access token not found") +} + +func (m *PartitionedManager) writeAccessToken(accessToken AccessToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.AccessTokensPartition[partitionKey] = make(map[string]AccessToken) + } + m.contract.AccessTokensPartition[partitionKey][key] = accessToken + return nil +} + +func matchFamilyRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string, clientID string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *PartitionedManager) readRefreshToken(envAliases []string, familyID, clientID, userAssertionHash, partitionKey string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshTokenObo(rt, userAssertionHash, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshTokenObo(rt, userAssertionHash, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokensPartition[partitionKey] { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func (m *PartitionedManager) writeRefreshToken(refreshToken accesstokens.RefreshToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := refreshToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.RefreshTokensPartition[partitionKey] = make(map[string]accesstokens.RefreshToken) + } + m.contract.RefreshTokensPartition[partitionKey][key] = refreshToken + return nil +} + +func (m *PartitionedManager) readIDToken(envAliases []string, realm, clientID, userAssertionHash, partitionKey string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokensPartition[partitionKey] { + if idt.Realm == realm && idt.ClientID == clientID && idt.UserAssertionHash == userAssertionHash { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *PartitionedManager) writeIDToken(idToken IDToken, partitionKey string) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.IDTokensPartition[partitionKey] == nil { + m.contract.IDTokensPartition[partitionKey] = make(map[string]IDToken) + } + m.contract.IDTokensPartition[partitionKey][key] = idToken + return nil +} + +func (m *PartitionedManager) readAccount(envAliases []string, realm, UserAssertionHash, partitionKey string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.AccountsPartition[partitionKey] { + if checkAlias(acc.Environment, envAliases) && acc.UserAssertionHash == UserAssertionHash && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *PartitionedManager) writeAccount(account shared.Account, partitionKey string) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.AccountsPartition[partitionKey] == nil { + m.contract.AccountsPartition[partitionKey] = make(map[string]shared.Account) + } + m.contract.AccountsPartition[partitionKey][key] = account + return nil +} + +func (m *PartitionedManager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *PartitionedManager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *PartitionedManager) update(cache *InMemoryContract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *PartitionedManager) Marshal() ([]byte, error) { + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *PartitionedManager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewInMemoryContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} + +func getPartitionKeyAccessToken(item AccessToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyRefreshToken(item accesstokens.RefreshToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyIDToken(item IDToken) string { + return item.HomeAccountID +} + +func getPartitionKeyAccount(item shared.Account) string { + return item.HomeAccountID +} + +func getPartitionKeyIDTokenRead(item AccessToken) string { + return item.HomeAccountID +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go new file mode 100644 index 00000000000..add7519252d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -0,0 +1,517 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package storage holds all cached token information for MSAL. This storage can be +// augmented with third-party extensions to provide persistent storage. In that case, +// reads and writes in upper packages will call Marshal() to take the entire in-memory +// representation and write it to storage and Unmarshal() to update the entire in-memory +// storage with what was in the persistent storage. The persistent storage can only be +// accessed in this way because multiple MSAL clients written in multiple languages can +// access the same storage and must adhere to the same method that was defined +// previously. +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// aadInstanceDiscoveryer allows faking in tests. +// It is implemented in production by ops/authority.Client +type aadInstanceDiscoveryer interface { + AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// TokenResponse mimics a token response that was pulled from the cache. +type TokenResponse struct { + RefreshToken accesstokens.RefreshToken + IDToken IDToken // *Credential + AccessToken AccessToken + Account shared.Account +} + +// Manager is an in-memory cache of access tokens, accounts and meta data. This data is +// updated on read/write calls. Unmarshal() replaces all data stored here with whatever +// was given to it on each call. +type Manager struct { + contract *Contract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// New is the constructor for Manager. +func New(requests *oauth.Client) *Manager { + m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewContract() + return m +} + +func checkAlias(alias string, aliases []string) bool { + for _, v := range aliases { + if alias == v { + return true + } + } + return false +} + +func isMatchingScopes(scopesOne []string, scopesTwo string) bool { + newScopesTwo := strings.Split(scopesTwo, scopeSeparator) + scopeCounter := 0 + for _, scope := range scopesOne { + for _, otherScope := range newScopesTwo { + if strings.EqualFold(scope, otherScope) { + scopeCounter++ + continue + } + } + } + return scopeCounter == len(scopesOne) +} + +// Read reads a storage token from the cache if it exists. +func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + homeAccountID := authParameters.HomeAccountID + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) + tr.AccessToken = accessToken + + if homeAccountID == "" { + // caller didn't specify a user, so there's no reason to search for an ID or refresh token + return tr, nil + } + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(homeAccountID, aliases, realm) + if err == nil { + tr.Account = account + } + return tr, nil +} + +const scopeSeparator = " " + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if err := m.writeRefreshToken(refreshToken); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken); err != nil { + return account, err + } + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if err := m.writeIDToken(idToken); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if err := m.writeAccount(account); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range m.contract.AccessTokens { + if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at + } + } + } + } + return AccessToken{} +} + +func (m *Manager) writeAccessToken(accessToken AccessToken) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + m.contract.AccessTokens[key] = accessToken + return nil +} + +func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshToken(rt, homeID, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshToken(rt, homeID, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokens { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error { + key := refreshToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.RefreshTokens[key] = refreshToken + return nil +} + +func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokens { + if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *Manager) writeIDToken(idToken IDToken) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.IDTokens[key] = idToken + return nil +} + +func (m *Manager) AllAccounts() []shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + var accounts []shared.Account + for _, v := range m.contract.Accounts { + accounts = append(accounts, v) + } + + return accounts +} + +func (m *Manager) Account(homeAccountID string) shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, v := range m.contract.Accounts { + if v.HomeAccountID == homeAccountID { + return v + } + } + + return shared.Account{} +} + +func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.Accounts { + if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *Manager) writeAccount(account shared.Account) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.Accounts[key] = account + return nil +} + +func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account. +func (m *Manager) RemoveAccount(account shared.Account, clientID string) { + m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID) + m.removeAccessTokens(account.HomeAccountID, account.Environment) + m.removeIDTokens(account.HomeAccountID, account.Environment) + m.removeAccounts(account.HomeAccountID, account.Environment) +} + +func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, rt := range m.contract.RefreshTokens { + // Check for RTs associated with the account. + if rt.HomeAccountID == homeID && rt.Environment == env { + // Do RT's app ownership check as a precaution, in case family apps + // and 3rd-party apps share same token cache, although they should not. + if rt.ClientID == clientID || rt.FamilyID != "" { + delete(m.contract.RefreshTokens, key) + } + } + } +} + +func (m *Manager) removeAccessTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, at := range m.contract.AccessTokens { + // Remove AT's associated with the account + if at.HomeAccountID == homeID && at.Environment == env { + // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check. + // It means ATs for other apps will also be removed, it is OK because: + // non-family apps are not supposed to share token cache to begin with; + // Even if it happens, we keep other app's RT already, so SSO still works. + delete(m.contract.AccessTokens, key) + } + } +} + +func (m *Manager) removeIDTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, idt := range m.contract.IDTokens { + // Remove ID tokens associated with the account. + if idt.HomeAccountID == homeID && idt.Environment == env { + delete(m.contract.IDTokens, key) + } + } +} + +func (m *Manager) removeAccounts(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, acc := range m.contract.Accounts { + // Remove the specified account. + if acc.HomeAccountID == homeID && acc.Environment == env { + delete(m.contract.Accounts, key) + } + } +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *Manager) update(cache *Contract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *Manager) Marshal() ([]byte, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *Manager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json new file mode 100644 index 00000000000..1d8181924d1 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json @@ -0,0 +1,56 @@ +{ + "Account": { + "uid.utid-login.windows.net-contoso": { + "username": "John Doe", + "local_account_id": "object1234", + "realm": "contoso", + "environment": "login.windows.net", + "home_account_id": "uid.utid", + "authority_type": "MSSTS" + } + }, + "RefreshToken": { + "uid.utid-login.windows.net-refreshtoken-my_client_id--s2 s1 s3": { + "target": "s2 s1 s3", + "environment": "login.windows.net", + "credential_type": "RefreshToken", + "secret": "a refresh token", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "AccessToken": { + "an-entry": { + "foo": "bar" + }, + "uid.utid-login.windows.net-accesstoken-my_client_id-contoso-s2 s1 s3": { + "environment": "login.windows.net", + "credential_type": "AccessToken", + "secret": "an access token", + "realm": "contoso", + "target": "s2 s1 s3", + "client_id": "my_client_id", + "cached_at": "1000", + "home_account_id": "uid.utid", + "extended_expires_on": "4600", + "expires_on": "4600" + } + }, + "IdToken": { + "uid.utid-login.windows.net-idtoken-my_client_id-contoso-": { + "realm": "contoso", + "environment": "login.windows.net", + "credential_type": "IdToken", + "secret": "header.eyJvaWQiOiAib2JqZWN0MTIzNCIsICJwcmVmZXJyZWRfdXNlcm5hbWUiOiAiSm9obiBEb2UiLCAic3ViIjogInN1YiJ9.signature", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "unknownEntity": {"field1":"1","field2":"whats"}, + "AppMetadata": { + "AppMetadata-login.windows.net-my_client_id": { + "environment": "login.windows.net", + "client_id": "my_client_id" + } + } + } \ No newline at end of file diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go new file mode 100644 index 00000000000..7b673e3fe12 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package exported contains internal types that are re-exported from a public package +package exported + +// AssertionRequestOptions has information required to generate a client assertion +type AssertionRequestOptions struct { + // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims. + ClientID string + + // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim. + TokenEndpoint string +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters struct { + // Claims contains any additional claims requested for the token + Claims string + // CorrelationID of the authentication request + CorrelationID string + // Scopes requested for the token + Scopes []string + // TenantID identifies the tenant in which to authenticate + TenantID string +} + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult struct { + // AccessToken is the requested token + AccessToken string + // ExpiresInSeconds is the lifetime of the token in seconds + ExpiresInSeconds int +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md new file mode 100644 index 00000000000..09edb01b7e4 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md @@ -0,0 +1,140 @@ +# JSON Package Design +Author: John Doak(jdoak@microsoft.com) + +## Why? + +This project needs a special type of marshal/unmarshal not directly supported +by the encoding/json package. + +The need revolves around a few key wants/needs: +- unmarshal and marshal structs representing JSON messages +- fields in the messgage not in the struct must be maintained when unmarshalled +- those same fields must be marshalled back when encoded again + +The initial version used map[string]interface{} to put in the keys that +were known and then any other keys were put into a field called AdditionalFields. + +This has a few negatives: +- Dual marshaling/unmarshalling is required +- Adding a struct field requires manually adding a key by name to be encoded/decoded from the map (which is a loosely coupled construct), which can lead to bugs that aren't detected or have bad side effects +- Tests can become quickly disconnected if those keys aren't put +in tests as well. So you think you have support working, but you +don't. Existing tests were found that didn't test the marshalling output. +- There is no enforcement that if AdditionalFields is required on one struct, it should be on all containers +that don't have custom marshal/unmarshal. + +This package aims to support our needs by providing custom Marshal()/Unmarshal() functions. + +This prevents all the negatives in the initial solution listed above. However, it does add its own negative: +- Custom encoding/decoding via reflection is messy (as can be seen in encoding/json itself) + +Go proverb: Reflection is never clear +Suggested reading: https://blog.golang.org/laws-of-reflection + +## Important design decisions + +- We don't want to understand all JSON decoding rules +- We don't want to deal with all the quoting, commas, etc on decode +- Need support for json.Marshaler/Unmarshaler, so we can support types like time.Time +- If struct does not implement json.Unmarshaler, it must have AdditionalFields defined +- We only support root level objects that are \*struct or struct + +To faciliate these goals, we will utilize the json.Encoder and json.Decoder. +They provide streaming processing (efficient) and return errors on bad JSON. + +Support for json.Marshaler/Unmarshaler allows for us to use non-basic types +that must be specially encoded/decoded (like time.Time objects). + +We don't support types that can't customer unmarshal or have AdditionalFields +in order to prevent future devs from forgetting that important field and +generating bad return values. + +Support for root level objects of \*struct or struct simply acknowledges the +fact that this is designed only for the purposes listed in the Introduction. +Outside that (like encoding a lone number) should be done with the +regular json package (as it will not have additional fields). + +We don't support a few things on json supported reference types and structs: +- \*map: no need for pointers to maps +- \*slice: no need for pointers to slices +- any further pointers on struct after \*struct + +There should never be a need for this in Go. + +## Design + +## State Machines + +This uses state machine designs that based upon the Rob Pike talk on +lexers and parsers: https://www.youtube.com/watch?v=HxaD_trXwRE + +This is the most common pattern for state machines in Go and +the model to follow closesly when dealing with streaming +processing of textual data. + +Our state machines are based on the type: +```go +type stateFn func() (stateFn, error) +``` + +The state machine itself is simply a struct that has methods that +satisfy stateFn. + +Our state machines have a few standard calls +- run(): runs the state machine +- start(): always the first stateFn to be called + +All state machines have the following logic: +* run() is called +* start() is called and returns the next stateFn or error +* stateFn is called + - If returned stateFn(next state) is non-nil, call it + - If error is non-nil, run() returns the error + - If stateFn == nil and err == nil, run() return err == nil + +## Supporting types + +Marshalling/Unmarshalling must support(within top level struct): +- struct +- \*struct +- []struct +- []\*struct +- []map[string]structContainer +- [][]structContainer + +**Term note:** structContainer == type that has a struct or \*struct inside it + +We specifically do not support []interface or map[string]interface +where the interface value would hold some value with a struct in it. + +Those will still marshal/unmarshal, but without support for +AdditionalFields. + +## Marshalling + +The marshalling design will be based around a statemachine design. + +The basic logic is as follows: + +* If struct has custom marshaller, call it and return +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* If struct does not have "AdditionalFields", give an error +* Get struct tag detailing json names to go names, create mapping +* For each public field name + - Write field name out + - If field value is a struct, recursively call our state machine + - Otherwise, use the json.Encoder to write out the value + +## Unmarshalling + +The unmarshalling desin is also based around a statemachine design. The +basic logic is as follows: + +* If struct has custom marhaller, call it +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* Get struct tag detailing json names to go names, create mapping +* For each key found + - If key exists, + - If value is basic type, extract value into struct field using Decoder + - If value is struct type, recursively call statemachine + - If key doesn't exist, add it to AdditionalFields if it exists using Decoder diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go new file mode 100644 index 00000000000..2238521f5f9 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package json provide functions for marshalling an unmarshalling types to JSON. These functions are meant to +// be utilized inside of structs that implement json.Unmarshaler and json.Marshaler interfaces. +// This package provides the additional functionality of writing fields that are not in the struct when marshalling +// to a field called AdditionalFields if that field exists and is a map[string]interface{}. +// When marshalling, if the struct has all the same prerequisites, it will uses the keys in AdditionalFields as +// extra fields. This package uses encoding/json underneath. +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +const addField = "AdditionalFields" +const ( + marshalJSON = "MarshalJSON" + unmarshalJSON = "UnmarshalJSON" +) + +var ( + leftBrace = []byte("{")[0] + rightBrace = []byte("}")[0] + comma = []byte(",")[0] + leftParen = []byte("[")[0] + rightParen = []byte("]")[0] +) + +var mapStrInterType = reflect.TypeOf(map[string]interface{}{}) + +// stateFn defines a state machine function. This will be used in all state +// machines in this package. +type stateFn func() (stateFn, error) + +// Marshal is used to marshal a type into its JSON representation. It +// wraps the stdlib calls in order to marshal a struct or *struct so +// that a field called "AdditionalFields" of type map[string]interface{} +// with "-" used inside struct tag `json:"-"` can be marshalled as if +// they were fields within the struct. +func Marshal(i interface{}) ([]byte, error) { + buff := bytes.Buffer{} + enc := json.NewEncoder(&buff) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + err := marshalStruct(v, &buff, enc) + if err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Unmarshal unmarshals a []byte representing JSON into i, which must be a *struct. In addition, if the struct has +// a field called AdditionalFields of type map[string]interface{}, JSON data representing fields not in the struct +// will be written as key/value pairs to AdditionalFields. +func Unmarshal(b []byte, i interface{}) error { + if len(b) == 0 { + return nil + } + + jdec := json.NewDecoder(bytes.NewBuffer(b)) + jdec.UseNumber() + return unmarshalStruct(jdec, i) +} + +// MarshalRaw marshals i into a json.RawMessage. If I cannot be marshalled, +// this will panic. This is exposed to help test AdditionalField values +// which are stored as json.RawMessage. +func MarshalRaw(i interface{}) json.RawMessage { + b, err := json.Marshal(i) + if err != nil { + panic(err) + } + return json.RawMessage(b) +} + +// isDelim simply tests to see if a json.Token is a delimeter. +func isDelim(got json.Token) bool { + switch got.(type) { + case json.Delim: + return true + } + return false +} + +// delimIs tests got to see if it is want. +func delimIs(got json.Token, want rune) bool { + switch v := got.(type) { + case json.Delim: + if v == json.Delim(want) { + return true + } + } + return false +} + +// hasMarshalJSON will determine if the value or a pointer to this value has +// the MarshalJSON method. +func hasMarshalJSON(v reflect.Value) bool { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + return false +} + +// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. +// This will panic if the method is not defined. +func callMarshalJSON(v reflect.Value) ([]byte, error) { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if v.CanAddr() { + v = v.Addr() + } + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) +} + +// hasUnmarshalJSON will determine if the value or a pointer to this value has +// the UnmarshalJSON method. +func hasUnmarshalJSON(v reflect.Value) bool { + // You can't unmarshal on a non-pointer type. + if v.Kind() != reflect.Ptr { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Unmarshaler) + return ok + } + + return false +} + +// hasOmitEmpty indicates if the field has instructed us to not output +// the field if omitempty is set on the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func hasOmitEmpty(tag string) bool { + sl := strings.Split(tag, ",") + for _, str := range sl { + if str == "omitempty" { + return true + } + } + return false +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go new file mode 100644 index 00000000000..cef442f25c8 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go @@ -0,0 +1,333 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// unmarshalMap unmarshal's a map. +func unmarshalMap(dec *json.Decoder, m reflect.Value) error { + if m.Kind() != reflect.Ptr || m.Elem().Kind() != reflect.Map { + panic("unmarshalMap called on non-*map value") + } + mapValueType := m.Elem().Type().Elem() + walk := mapWalk{dec: dec, m: m, valueType: mapValueType} + if err := walk.run(); err != nil { + return err + } + return nil +} + +type mapWalk struct { + dec *json.Decoder + key string + m reflect.Value + valueType reflect.Type +} + +// run runs our decoder state machine. +func (m *mapWalk) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapWalk) start() (stateFn, error) { + // maps can have custom unmarshaler's. + if hasUnmarshalJSON(m.m) { + err := m.dec.Decode(m.m.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the map value is: + // *struct/struct/map/slice + // otherwise use standard decode + t, _ := m.valueBaseType() + switch t.Kind() { + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := m.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to JSON null. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + return m.next, nil + case reflect.Ptr: + return nil, fmt.Errorf("do not support maps with values of '**type' or '*reference") + } + + // This is a basic map type, so just use Decode(). + if err := m.dec.Decode(m.m.Interface()); err != nil { + return nil, err + } + + return nil, nil +} + +func (m *mapWalk) next() (stateFn, error) { + if m.dec.More() { + key, err := m.dec.Token() + if err != nil { + return nil, err + } + m.key = key.(string) + return m.storeValue, nil + } + // No more entries, so remove final }. + _, err := m.dec.Token() + if err != nil { + return nil, err + } + return nil, nil +} + +func (m *mapWalk) storeValue() (stateFn, error) { + v := m.valueType + for { + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + continue + case reflect.Struct: + return m.storeStruct, nil + case reflect.Map: + return m.storeMap, nil + case reflect.Slice: + return m.storeSlice, nil + } + return nil, fmt.Errorf("bug: mapWalk.storeValue() called on unsupported type: %v", v.Kind()) + } +} + +func (m *mapWalk) storeStruct() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalStruct(m.dec, v.Interface()); err != nil { + return nil, err + } + + if m.valueType.Kind() == reflect.Ptr { + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + return m.next, nil + } + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +func (m *mapWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(m.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(m.dec, ptr); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + + return m.next, nil +} + +func (m *mapWalk) storeSlice() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalSlice(m.dec, v); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (m *mapWalk) valueBaseType() (reflect.Type, bool) { + ptr := false + v := m.valueType + if v.Kind() == reflect.Ptr { + ptr = true + v = v.Elem() + } + return v, ptr +} + +// unmarshalSlice unmarshal's the next value, which must be a slice, into +// ptrSlice, which must be a pointer to a slice. newValue() can be use to +// create the slice. +func unmarshalSlice(dec *json.Decoder, ptrSlice reflect.Value) error { + if ptrSlice.Kind() != reflect.Ptr || ptrSlice.Elem().Kind() != reflect.Slice { + panic("unmarshalSlice called on non-*[]slice value") + } + sliceValueType := ptrSlice.Elem().Type().Elem() + walk := sliceWalk{ + dec: dec, + s: ptrSlice, + valueType: sliceValueType, + } + if err := walk.run(); err != nil { + return err + } + + return nil +} + +type sliceWalk struct { + dec *json.Decoder + s reflect.Value // *[]slice + valueType reflect.Type +} + +// run runs our decoder state machine. +func (s *sliceWalk) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceWalk) start() (stateFn, error) { + // slices can have custom unmarshaler's. + if hasUnmarshalJSON(s.s) { + err := s.dec.Decode(s.s.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the slice value is: + // []*struct/[]struct/[]map/[]slice + // otherwise use standard decode + t := s.valueBaseType() + + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("cannot unmarshal into a ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := s.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to nil. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '[') { + return nil, fmt.Errorf("Unmarshal expected opening [, received %v", delim) + } + return s.next, nil + } + + if err := s.dec.Decode(s.s.Interface()); err != nil { + return nil, err + } + return nil, nil +} + +func (s *sliceWalk) next() (stateFn, error) { + if s.dec.More() { + return s.storeValue, nil + } + // Nothing left in the slice, remove closing ] + _, err := s.dec.Token() + return nil, err +} + +func (s *sliceWalk) storeValue() (stateFn, error) { + t := s.valueBaseType() + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("do not support 'pointer to pointer' or 'pointer to reference' types") + case reflect.Struct: + return s.storeStruct, nil + case reflect.Map: + return s.storeMap, nil + case reflect.Slice: + return s.storeSlice, nil + } + return nil, fmt.Errorf("bug: sliceWalk.storeValue() called on unsupported type: %v", t.Kind()) +} + +func (s *sliceWalk) storeStruct() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalStruct(s.dec, v.Interface()); err != nil { + return nil, err + } + + if s.valueType.Kind() == reflect.Ptr { + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + return s.next, nil + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + return s.next, nil +} + +func (s *sliceWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(s.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + + if err := unmarshalMap(s.dec, ptr); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + + return s.next, nil +} + +func (s *sliceWalk) storeSlice() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalSlice(s.dec, v); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + + return s.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (s *sliceWalk) valueBaseType() reflect.Type { + v := s.valueType + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + return v +} + +// newValue() returns a new *type that represents type passed. +func newValue(valueType reflect.Type) reflect.Value { + if valueType.Kind() == reflect.Ptr { + return reflect.New(valueType.Elem()) + } + return reflect.New(valueType) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go new file mode 100644 index 00000000000..df5dc6e11b5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go @@ -0,0 +1,346 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +// marshalStruct takes in i, which must be a *struct or struct and marshals its content +// as JSON into buff (sometimes with writes to buff directly, sometimes via enc). +// This call is recursive for all fields of *struct or struct type. +func marshalStruct(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // We only care about custom Marshalling a struct. + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: marshal() received a non *struct or struct, received type %T", v.Interface()) + } + + if hasMarshalJSON(v) { + b, err := callMarshalJSON(v) + if err != nil { + return err + } + buff.Write(b) + return nil + } + + t := v.Type() + + // If it has an AdditionalFields field make sure its the right type. + f := v.FieldByName(addField) + if f.Kind() != reflect.Invalid { + if f.Kind() != reflect.Map { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + if !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + } + + translator, err := findFields(v) + if err != nil { + return err + } + + buff.WriteByte(leftBrace) + for x := 0; x < v.NumField(); x++ { + field := v.Field(x) + + // We don't access private fields. + if unicode.IsLower(rune(t.Field(x).Name[0])) { + continue + } + + if t.Field(x).Name == addField { + if v.Field(x).Len() > 0 { + if err := writeAddFields(field.Interface(), buff, enc); err != nil { + return err + } + buff.WriteByte(comma) + } + continue + } + + // If they have omitempty set, we don't write out the field if + // it is the zero value. + if hasOmitEmpty(t.Field(x).Tag.Get("json")) { + if v.Field(x).IsZero() { + continue + } + } + + // Write out the field name part. + jsonName := translator.jsonName(t.Field(x).Name) + buff.WriteString(fmt.Sprintf("%q:", jsonName)) + + if field.Kind() == reflect.Ptr { + field = field.Elem() + } + + if err := marshalStructField(field, buff, enc); err != nil { + return err + } + } + + buff.Truncate(buff.Len() - 1) // Remove final comma + buff.WriteByte(rightBrace) + + return nil +} + +func marshalStructField(field reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + // Determine if we need a trailing comma. + defer buff.WriteByte(comma) + + switch field.Kind() { + // If it was a *struct or struct, we need to recursively all marshal(). + case reflect.Struct: + if field.CanAddr() { + field = field.Addr() + } + return marshalStruct(field, buff, enc) + case reflect.Map: + return marshalMap(field, buff, enc) + case reflect.Slice: + return marshalSlice(field, buff, enc) + } + + // It is just a basic type, so encode it. + if err := enc.Encode(field.Interface()); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + return nil +} + +func marshalMap(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Map { + return fmt.Errorf("bug: marshalMap() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftBrace) + buff.WriteByte(rightBrace) + return nil + } + encoder := mapEncode{m: v, buff: buff, enc: enc} + return encoder.run() +} + +type mapEncode struct { + m reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (m *mapEncode) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapEncode) start() (stateFn, error) { + if hasMarshalJSON(m.m) { + b, err := callMarshalJSON(m.m) + if err != nil { + return nil, err + } + m.buff.Write(b) + return nil, nil + } + + valueBaseType := m.m.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + m.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return m.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := m.enc.Encode(m.m.Interface()); err != nil { + return nil, err + } + m.buff.Truncate(m.buff.Len() - 1) // Remove Encode() added \n + return nil, nil +} + +func (m *mapEncode) encode() (stateFn, error) { + m.buff.WriteByte(leftBrace) + + iter := m.m.MapRange() + for iter.Next() { + // Write the key. + k := iter.Key() + m.buff.WriteString(fmt.Sprintf("%q:", k.String())) + + v := iter.Value() + switch m.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, m.buff, m.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", m.valueBaseType.Kind())) + } + m.buff.WriteByte(comma) + } + m.buff.Truncate(m.buff.Len() - 1) // Remove final comma + m.buff.WriteByte(rightBrace) + + return nil, nil +} + +func marshalSlice(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Slice { + return fmt.Errorf("bug: marshalSlice() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftParen) + buff.WriteByte(rightParen) + return nil + } + encoder := sliceEncode{s: v, buff: buff, enc: enc} + return encoder.run() +} + +type sliceEncode struct { + s reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (s *sliceEncode) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceEncode) start() (stateFn, error) { + if hasMarshalJSON(s.s) { + b, err := callMarshalJSON(s.s) + if err != nil { + return nil, err + } + s.buff.Write(b) + return nil, nil + } + + valueBaseType := s.s.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + s.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return s.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := s.enc.Encode(s.s.Interface()); err != nil { + return nil, err + } + s.buff.Truncate(s.buff.Len() - 1) // Remove Encode added \n + + return nil, nil +} + +func (s *sliceEncode) encode() (stateFn, error) { + s.buff.WriteByte(leftParen) + for i := 0; i < s.s.Len(); i++ { + v := s.s.Index(i) + switch s.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, s.buff, s.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", s.valueBaseType.Kind())) + } + s.buff.WriteByte(comma) + } + s.buff.Truncate(s.buff.Len() - 1) // Remove final comma + s.buff.WriteByte(rightParen) + return nil, nil +} + +// writeAddFields writes the AdditionalFields struct field out to JSON as field +// values. i must be a map[string]interface{} or this will panic. +func writeAddFields(i interface{}, buff *bytes.Buffer, enc *json.Encoder) error { + m := i.(map[string]interface{}) + + x := 0 + for k, v := range m { + buff.WriteString(fmt.Sprintf("%q:", k)) + if err := enc.Encode(v); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + if x+1 != len(m) { + buff.WriteByte(comma) + } + x++ + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go new file mode 100644 index 00000000000..07751544a28 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go @@ -0,0 +1,290 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +func unmarshalStruct(jdec *json.Decoder, i interface{}) error { + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + + if hasUnmarshalJSON(v) { + // Indicates that this type has a custom Unmarshaler. + return jdec.Decode(v.Addr().Interface()) + } + + f := v.FieldByName(addField) + if f.Kind() == reflect.Invalid { + return fmt.Errorf("Unmarshal(%T) only supports structs that have the field AdditionalFields or implements json.Unmarshaler", i) + } + + if f.Kind() != reflect.Map || !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", i) + } + + dec := newDecoder(jdec, v) + return dec.run() +} + +type decoder struct { + dec *json.Decoder + value reflect.Value // This will be a reflect.Struct + translator translateFields + key string +} + +func newDecoder(dec *json.Decoder, value reflect.Value) *decoder { + return &decoder{value: value, dec: dec} +} + +// run runs our decoder state machine. +func (d *decoder) run() error { + var state = d.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +// start looks for our opening delimeter '{' and then transitions to looping through our fields. +func (d *decoder) start() (stateFn, error) { + var err error + d.translator, err = findFields(d.value) + if err != nil { + return nil, err + } + + delim, err := d.dec.Token() + if err != nil { + return nil, err + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + + return d.next, nil +} + +// next gets the next struct field name from the raw json or stops the machine if we get our closing }. +func (d *decoder) next() (stateFn, error) { + if !d.dec.More() { + // Remove the closing }. + if _, err := d.dec.Token(); err != nil { + return nil, err + } + return nil, nil + } + + key, err := d.dec.Token() + if err != nil { + return nil, err + } + + d.key = key.(string) + return d.storeValue, nil +} + +// storeValue takes the next value and stores it our struct. If the field can't be found +// in the struct, it pushes the operation to storeAdditional(). +func (d *decoder) storeValue() (stateFn, error) { + goName := d.translator.goName(d.key) + if goName == "" { + goName = d.key + } + + // We don't have the field in the struct, so it goes in AdditionalFields. + f := d.value.FieldByName(goName) + if f.Kind() == reflect.Invalid { + return d.storeAdditional, nil + } + + // Indicates that this type has a custom Unmarshaler. + if hasUnmarshalJSON(f) { + err := d.dec.Decode(f.Addr().Interface()) + if err != nil { + return nil, err + } + return d.next, nil + } + + t, isPtr, err := fieldBaseType(d.value, goName) + if err != nil { + return nil, fmt.Errorf("type(%s) had field(%s) %w", d.value.Type().Name(), goName, err) + } + + switch t.Kind() { + // We need to recursively call ourselves on any *struct or struct. + case reflect.Struct: + if isPtr { + if f.IsNil() { + f.Set(reflect.New(t)) + } + } else { + f = f.Addr() + } + if err := unmarshalStruct(d.dec, f.Interface()); err != nil { + return nil, err + } + return d.next, nil + case reflect.Map: + v := reflect.MakeMap(f.Type()) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + case reflect.Slice: + v := reflect.MakeSlice(f.Type(), 0, 0) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalSlice(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + } + + if !isPtr { + f = f.Addr() + } + + // For values that are pointers, we need them to be non-nil in order + // to decode into them. + if f.IsNil() { + f.Set(reflect.New(t)) + } + + if err := d.dec.Decode(f.Interface()); err != nil { + return nil, err + } + + return d.next, nil +} + +// storeAdditional pushes the key/value into our .AdditionalFields map. +func (d *decoder) storeAdditional() (stateFn, error) { + rw := json.RawMessage{} + if err := d.dec.Decode(&rw); err != nil { + return nil, err + } + field := d.value.FieldByName(addField) + if field.IsNil() { + field.Set(reflect.MakeMap(field.Type())) + } + field.SetMapIndex(reflect.ValueOf(d.key), reflect.ValueOf(rw)) + return d.next, nil +} + +func fieldBaseType(v reflect.Value, fieldName string) (t reflect.Type, isPtr bool, err error) { + sf, ok := v.Type().FieldByName(fieldName) + if !ok { + return nil, false, fmt.Errorf("bug: fieldBaseType() lookup of field(%s) on type(%s): do not have field", fieldName, v.Type().Name()) + } + t = sf.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + isPtr = true + } + if t.Kind() == reflect.Ptr { + return nil, isPtr, fmt.Errorf("received pointer to pointer type, not supported") + } + return t, isPtr, nil +} + +type translateField struct { + jsonName string + goName string +} + +// translateFields is a list of translateFields with a handy lookup method. +type translateFields []translateField + +// goName loops through a list of fields looking for one contaning the jsonName and +// returning the goName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) goName(jsonName string) string { + for _, entry := range t { + if entry.jsonName == jsonName { + return entry.goName + } + } + return "" +} + +// jsonName loops through a list of fields looking for one contaning the goName and +// returning the jsonName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) jsonName(goName string) string { + for _, entry := range t { + if entry.goName == goName { + return entry.jsonName + } + } + return "" +} + +var umarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + +// findFields parses a struct and writes the field tags for lookup. It will return an error +// if any field has a type of *struct or struct that does not implement json.Marshaler. +func findFields(v reflect.Value) (translateFields, error) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("findFields received a %s type, expected *struct or struct", v.Type().Name()) + } + tfs := make([]translateField, 0, v.NumField()) + for i := 0; i < v.NumField(); i++ { + tf := translateField{ + goName: v.Type().Field(i).Name, + jsonName: parseTag(v.Type().Field(i).Tag.Get("json")), + } + switch tf.jsonName { + case "", "-": + tf.jsonName = tf.goName + } + tfs = append(tfs, tf) + + f := v.Field(i) + if f.Kind() == reflect.Ptr { + f = f.Elem() + } + if f.Kind() == reflect.Struct { + if f.Type().Implements(umarshalerType) { + return nil, fmt.Errorf("struct type %q which has field %q which "+ + "doesn't implement json.Unmarshaler", v.Type().Name(), v.Type().Field(i).Name) + } + } + } + return tfs, nil +} + +// parseTag just returns the first entry in the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func parseTag(tag string) string { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx] + } + return tag +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go new file mode 100644 index 00000000000..a1c99621e9f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package time provides for custom types to translate time from JSON and other formats +// into time.Time objects. +package time + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Unix provides a type that can marshal and unmarshal a string representation +// of the unix epoch into a time.Time object. +type Unix struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (u Unix) MarshalJSON() ([]byte, error) { + if u.T.IsZero() { + return []byte(""), nil + } + return []byte(fmt.Sprintf("%q", strconv.FormatInt(u.T.Unix(), 10))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (u *Unix) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + u.T = time.Unix(int64(i), 0) + return nil +} + +// DurationTime provides a type that can marshal and unmarshal a string representation +// of a duration from now into a time.Time object. +// Note: I'm not sure this is the best way to do this. What happens is we get a field +// called "expires_in" that represents the seconds from now that this expires. We +// turn that into a time we call .ExpiresOn. But maybe we should be recording +// when the token was received at .TokenRecieved and .ExpiresIn should remain as a duration. +// Then we could have a method called ExpiresOn(). Honestly, the whole thing is +// bad because the server doesn't return a concrete time. I think this is +// cleaner, but its not great either. +type DurationTime struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (d DurationTime) MarshalJSON() ([]byte, error) { + if d.T.IsZero() { + return []byte(""), nil + } + + dt := time.Until(d.T) + return []byte(fmt.Sprintf("%d", int64(dt*time.Second))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (d *DurationTime) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + d.T = time.Now().Add(time.Duration(i) * time.Second) + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go new file mode 100644 index 00000000000..04236ff3127 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package local contains a local HTTP server used with interactive authentication. +package local + +import ( + "context" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +var okPage = []byte(` + + + + + Authentication Complete + + +

      Authentication complete. You can return to the application. Feel free to close this browser tab.

      + + +`) + +const failPage = ` + + + + + Authentication Failed + + +

      Authentication failed. You can return to the application. Feel free to close this browser tab.

      +

      Error details: error %s error_description: %s

      + + +` + +// Result is the result from the redirect. +type Result struct { + // Code is the code sent by the authority server. + Code string + // Err is set if there was an error. + Err error +} + +// Server is an HTTP server. +type Server struct { + // Addr is the address the server is listening on. + Addr string + resultCh chan Result + s *http.Server + reqState string +} + +// New creates a local HTTP server and starts it. +func New(reqState string, port int) (*Server, error) { + var l net.Listener + var err error + var portStr string + if port > 0 { + // use port provided by caller + l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + portStr = strconv.FormatInt(int64(port), 10) + } else { + // find a free port + for i := 0; i < 10; i++ { + l, err = net.Listen("tcp", "localhost:0") + if err != nil { + continue + } + addr := l.Addr().String() + portStr = addr[strings.LastIndex(addr, ":")+1:] + break + } + } + if err != nil { + return nil, err + } + + serv := &Server{ + Addr: fmt.Sprintf("http://localhost:%s", portStr), + s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second}, + reqState: reqState, + resultCh: make(chan Result, 1), + } + serv.s.Handler = http.HandlerFunc(serv.handler) + + if err := serv.start(l); err != nil { + return nil, err + } + + return serv, nil +} + +func (s *Server) start(l net.Listener) error { + go func() { + err := s.s.Serve(l) + if err != nil { + select { + case s.resultCh <- Result{Err: err}: + default: + } + } + }() + + return nil +} + +// Result gets the result of the redirect operation. Once a single result is returned, the server +// is shutdown. ctx deadline will be honored. +func (s *Server) Result(ctx context.Context) Result { + select { + case <-ctx.Done(): + return Result{Err: ctx.Err()} + case r := <-s.resultCh: + return r + } +} + +// Shutdown shuts down the server. +func (s *Server) Shutdown() { + // Note: You might get clever and think you can do this in handler() as a defer, you can't. + _ = s.s.Shutdown(context.Background()) +} + +func (s *Server) putResult(r Result) { + select { + case s.resultCh <- r: + default: + } +} + +func (s *Server) handler(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + + headerErr := q.Get("error") + if headerErr != "" { + desc := q.Get("error_description") + // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, + // change this to s.error() and make s.error() write the failPage instead of an error code. + _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) + s.putResult(Result{Err: fmt.Errorf(desc)}) + return + } + + respState := q.Get("state") + switch respState { + case s.reqState: + case "": + s.error(w, http.StatusInternalServerError, "server didn't send OAuth state") + return + default: + s.error(w, http.StatusInternalServerError, "mismatched OAuth state, req(%s), resp(%s)", s.reqState, respState) + return + } + + code := q.Get("code") + if code == "" { + s.error(w, http.StatusInternalServerError, "authorization code missing in query string") + return + } + + _, _ = w.Write(okPage) + s.putResult(Result{Code: code}) +} + +func (s *Server) error(w http.ResponseWriter, code int, str string, i ...interface{}) { + err := fmt.Errorf(str, i...) + http.Error(w, err.Error(), code) + s.putResult(Result{Err: err}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go new file mode 100644 index 00000000000..ebd86e2baf9 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -0,0 +1,353 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package oauth + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" + "github.com/google/uuid" +) + +// ResolveEndpointer contains the methods for resolving authority endpoints. +type ResolveEndpointer interface { + ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) +} + +// AccessTokens contains the methods for fetching tokens from different sources. +type AccessTokens interface { + DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (accesstokens.DeviceCodeResult, error) + FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (accesstokens.TokenResponse, error) + FromAuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) + FromRefreshToken(ctx context.Context, appType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken string) (accesstokens.TokenResponse, error) + FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (accesstokens.TokenResponse, error) + FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (accesstokens.TokenResponse, error) + FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (accesstokens.TokenResponse, error) + FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (accesstokens.TokenResponse, error) + FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult accesstokens.DeviceCodeResult) (accesstokens.TokenResponse, error) + FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (accesstokens.TokenResponse, error) +} + +// FetchAuthority will be implemented by authority.Authority. +type FetchAuthority interface { + UserRealm(context.Context, authority.AuthParams) (authority.UserRealm, error) + AADInstanceDiscovery(context.Context, authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// FetchWSTrust contains the methods for interacting with WSTrust endpoints. +type FetchWSTrust interface { + Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) + SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (wstrust.SamlTokenInfo, error) +} + +// Client provides tokens for various types of token requests. +type Client struct { + Resolver ResolveEndpointer + AccessTokens AccessTokens + Authority FetchAuthority + WSTrust FetchWSTrust +} + +// New is the constructor for Token. +func New(httpClient ops.HTTPClient) *Client { + r := ops.New(httpClient) + return &Client{ + Resolver: newAuthorityEndpoint(r), + AccessTokens: r.AccessTokens(), + Authority: r.Authority(), + WSTrust: r.WSTrust(), + } +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance. +func (t *Client) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + return t.Resolver.ResolveEndpoints(ctx, authorityInfo, userPrincipalName) +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (t *Client) AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) { + return t.Authority.AADInstanceDiscovery(ctx, authorityInfo) +} + +// AuthCode returns a token based on an authorization code. +func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) { + if err := scopeError(req.AuthParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &req.AuthParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tResp, err := t.AccessTokens.FromAuthCode(ctx, req) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("could not retrieve token from auth code: %w", err) + } + return tResp, nil +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if cred.TokenProvider != nil { + now := time.Now() + scopes := make([]string, len(authParams.Scopes)) + copy(scopes, authParams.Scopes) + params := exported.TokenProviderParameters{ + Claims: authParams.Claims, + CorrelationID: uuid.New().String(), + Scopes: scopes, + TenantID: authParams.AuthorityInfo.Tenant, + } + tr, err := cred.TokenProvider(ctx, params) + if err != nil { + if len(scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{ + AccessToken: tr.AccessToken, + ExpiresOn: internalTime.DurationTime{ + T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), + }, + GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, + }, nil + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromAssertion(ctx, authParams, jwt) +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromUserAssertionClientCertificate(ctx, authParams, authParams.UserAssertion, jwt) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +func (t *Client) Refresh(ctx context.Context, reqType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken accesstokens.RefreshToken) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tr, err := t.AccessTokens.FromRefreshToken(ctx, reqType, authParams, cc, refreshToken.Secret) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +// UsernamePassword retrieves a token where a username and password is used. However, if this is +// a user realm of "Federated", this uses SAML tokens. If "Managed", uses normal username/password. +func (t *Client) UsernamePassword(ctx context.Context, authParams authority.AuthParams) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + + if authParams.AuthorityInfo.AuthorityType == authority.ADFS { + if err := t.resolveEndpoint(ctx, &authParams, authParams.Username); err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + userRealm, err := t.Authority.UserRealm(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting user realm from authority: %w", err) + } + + switch userRealm.AccountType { + case authority.Federated: + mexDoc, err := t.WSTrust.Mex(ctx, userRealm.FederationMetadataURL) + if err != nil { + err = fmt.Errorf("problem getting mex doc from federated url(%s): %w", userRealm.FederationMetadataURL, err) + return accesstokens.TokenResponse{}, err + } + + saml, err := t.WSTrust.SAMLTokenInfo(ctx, authParams, userRealm.CloudAudienceURN, mexDoc.UsernamePasswordEndpoint) + if err != nil { + err = fmt.Errorf("problem getting SAML token info: %w", err) + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromSamlGrant(ctx, authParams, saml) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil + case authority.Managed: + if len(authParams.Scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + return accesstokens.TokenResponse{}, errors.New("unknown account type") +} + +// DeviceCode is the result of a call to Token.DeviceCode(). +type DeviceCode struct { + // Result is the device code result from the first call in the device code flow. This allows + // the caller to retrieve the displayed code that is used to authorize on the second device. + Result accesstokens.DeviceCodeResult + authParams authority.AuthParams + + accessTokens AccessTokens +} + +// Token returns a token AFTER the user uses the user code on the second device. This will block +// until either: (1) the code is input by the user and the service releases a token, (2) the token +// expires, (3) the Context passed to .DeviceCode() is cancelled or expires, (4) some other service +// error occurs. +func (d DeviceCode) Token(ctx context.Context) (accesstokens.TokenResponse, error) { + if d.accessTokens == nil { + return accesstokens.TokenResponse{}, fmt.Errorf("DeviceCode was either created outside its package or the creating method had an error. DeviceCode is not valid") + } + + var cancel context.CancelFunc + if deadline, ok := ctx.Deadline(); !ok || d.Result.ExpiresOn.Before(deadline) { + ctx, cancel = context.WithDeadline(ctx, d.Result.ExpiresOn) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + var interval = 50 * time.Millisecond + timer := time.NewTimer(interval) + defer timer.Stop() + + for { + timer.Reset(interval) + select { + case <-ctx.Done(): + return accesstokens.TokenResponse{}, ctx.Err() + case <-timer.C: + interval += interval * 2 + if interval > 5*time.Second { + interval = 5 * time.Second + } + } + + token, err := d.accessTokens.FromDeviceCodeResult(ctx, d.authParams, d.Result) + if err != nil && isWaitDeviceCodeErr(err) { + continue + } + return token, err // This handles if it was a non-wait error or success + } +} + +type deviceCodeError struct { + Error string `json:"error"` +} + +func isWaitDeviceCodeErr(err error) bool { + var c errors.CallErr + if !errors.As(err, &c) { + return false + } + if c.Resp.StatusCode != 400 { + return false + } + var dCErr deviceCodeError + defer c.Resp.Body.Close() + body, err := io.ReadAll(c.Resp.Body) + if err != nil { + return false + } + err = json.Unmarshal(body, &dCErr) + if err != nil { + return false + } + if dCErr.Error == "authorization_pending" || dCErr.Error == "slow_down" { + return true + } + return false +} + +// DeviceCode returns a DeviceCode object that can be used to get the code that must be entered on the second +// device and optionally the token once the code has been entered on the second device. +func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams) (DeviceCode, error) { + if err := scopeError(authParams); err != nil { + return DeviceCode{}, err + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return DeviceCode{}, err + } + + dcr, err := t.AccessTokens.DeviceCodeResult(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dcr, authParams: authParams, accessTokens: t.AccessTokens}, nil +} + +func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { + endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) + if err != nil { + return fmt.Errorf("unable to resolve an endpoint: %s", err) + } + authParams.Endpoints = endpoints + return nil +} + +// scopeError takes an authority.AuthParams and returns an error +// if len(AuthParams.Scope) == 0. +func scopeError(a authority.AuthParams) error { + // TODO(someone): we could look deeper at the message to determine if + // it's a scope error, but this is a good start. + /* + {error":"invalid_scope","error_description":"AADSTS1002012: The provided value for scope + openid offline_access profile is not valid. Client credential flows must have a scope value + with /.default suffixed to the resource identifier (application ID URI)...} + */ + if len(a.Scopes) == 0 { + return fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which is invalid") + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go new file mode 100644 index 00000000000..fa6bb61c8ef --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -0,0 +1,451 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package accesstokens exposes a REST client for querying backend systems to get various types of +access tokens (oauth) for use in authentication. + +These calls are of type "application/x-www-form-urlencoded". This means we use url.Values to +represent arguments and then encode them into the POST body message. We receive JSON in +return for the requests. The request definition is defined in https://tools.ietf.org/html/rfc7521#section-4.2 . +*/ +package accesstokens + +import ( + "context" + "crypto" + + /* #nosec */ + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/golang-jwt/jwt/v4" + "github.com/google/uuid" +) + +const ( + grantType = "grant_type" + deviceCode = "device_code" + clientID = "client_id" + clientInfo = "client_info" + clientInfoVal = "1" + username = "username" + password = "password" +) + +//go:generate stringer -type=AppType + +// AppType is whether the authorization code flow is for a public or confidential client. +type AppType int8 + +const ( + // ATUnknown is the zero value when the type hasn't been set. + ATUnknown AppType = iota + // ATPublic indicates this if for the Public.Client. + ATPublic + // ATConfidential indicates this if for the Confidential.Client. + ATConfidential +) + +type urlFormCaller interface { + URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error +} + +// DeviceCodeResponse represents the HTTP response received from the device code endpoint +type DeviceCodeResponse struct { + authority.OAuthResponseBase + + UserCode string `json:"user_code"` + DeviceCode string `json:"device_code"` + VerificationURL string `json:"verification_url"` + ExpiresIn int `json:"expires_in"` + Interval int `json:"interval"` + Message string `json:"message"` + + AdditionalFields map[string]interface{} +} + +// Convert converts the DeviceCodeResponse to a DeviceCodeResult +func (dcr DeviceCodeResponse) Convert(clientID string, scopes []string) DeviceCodeResult { + expiresOn := time.Now().UTC().Add(time.Duration(dcr.ExpiresIn) * time.Second) + return NewDeviceCodeResult(dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, expiresOn, dcr.Interval, dcr.Message, clientID, scopes) +} + +// Credential represents the credential used in confidential client flows. This can be either +// a Secret or Cert/Key. +type Credential struct { + // Secret contains the credential secret if we are doing auth by secret. + Secret string + + // Cert is the public certificate, if we're authenticating by certificate. + Cert *x509.Certificate + // Key is the private key for signing, if we're authenticating by certificate. + Key crypto.PrivateKey + // X5c is the JWT assertion's x5c header value, required for SN/I authentication. + X5c []string + + // AssertionCallback is a function provided by the application, if we're authenticating by assertion. + AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error) + + // TokenProvider is a function provided by the application that implements custom authentication + // logic for a confidential client + TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error) +} + +// JWT gets the jwt assertion when the credential is not using a secret. +func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) { + if c.AssertionCallback != nil { + options := exported.AssertionRequestOptions{ + ClientID: authParams.ClientID, + TokenEndpoint: authParams.Endpoints.TokenEndpoint, + } + return c.AssertionCallback(ctx, options) + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "aud": authParams.Endpoints.TokenEndpoint, + "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), + "iss": authParams.ClientID, + "jti": uuid.New().String(), + "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), + "sub": authParams.ClientID, + }) + token.Header = map[string]interface{}{ + "alg": "RS256", + "typ": "JWT", + "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)), + } + + if authParams.SendX5C { + token.Header["x5c"] = c.X5c + } + + assertion, err := token.SignedString(c.Key) + if err != nil { + return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) + } + return assertion, nil +} + +// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. +// https://tools.ietf.org/html/rfc7517#section-4.8 +func thumbprint(cert *x509.Certificate) []byte { + /* #nosec */ + a := sha1.Sum(cert.Raw) + return a[:] +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm urlFormCaller + + testing bool +} + +// FromUsernamePassword uses a username and password to get an access token. +func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.Password) + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +// AuthCodeRequest stores the values required to request a token from the authority using an authorization code +type AuthCodeRequest struct { + AuthParams authority.AuthParams + Code string + CodeChallenge string + Credential *Credential + AppType AppType +} + +// NewCodeChallengeRequest returns an AuthCodeRequest that uses a code challenge.. +func NewCodeChallengeRequest(params authority.AuthParams, appType AppType, cc *Credential, code, challenge string) (AuthCodeRequest, error) { + if appType == ATUnknown { + return AuthCodeRequest{}, fmt.Errorf("bug: NewCodeChallengeRequest() called with AppType == ATUnknown") + } + return AuthCodeRequest{ + AuthParams: params, + AppType: appType, + Code: code, + CodeChallenge: challenge, + Credential: cc, + }, nil +} + +// FromAuthCode uses an authorization code to retrieve an access token. +func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenResponse, error) { + var qv url.Values + + switch req.AppType { + case ATUnknown: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == ATUnknown") + case ATConfidential: + var err error + if req.Credential == nil { + return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app") + } + qv, err = prepURLVals(ctx, req.Credential, req.AuthParams) + if err != nil { + return TokenResponse{}, err + } + case ATPublic: + qv = url.Values{} + default: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == %v, which we do not recongnize", req.AppType) + } + + qv.Set(grantType, grant.AuthCode) + qv.Set("code", req.Code) + qv.Set("code_verifier", req.CodeChallenge) + qv.Set("redirect_uri", req.AuthParams.Redirecturi) + qv.Set(clientID, req.AuthParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, req.AuthParams) + if err := addClaims(qv, req.AuthParams); err != nil { + return TokenResponse{}, err + } + + return c.doTokenResp(ctx, req.AuthParams, qv) +} + +// FromRefreshToken uses a refresh token (for refreshing credentials) to get a new access token. +func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParams authority.AuthParams, cc *Credential, refreshToken string) (TokenResponse, error) { + qv := url.Values{} + if appType == ATConfidential { + var err error + qv, err = prepURLVals(ctx, cc, authParams) + if err != nil { + return TokenResponse{}, err + } + } + if err := addClaims(qv, authParams); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.RefreshToken) + qv.Set(clientID, authParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("refresh_token", refreshToken) + addScopeQueryParam(qv, authParams) + + return c.doTokenResp(ctx, authParams, qv) +} + +// FromClientSecret uses a client's secret (aka password) to get a new token. +func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_secret", clientSecret) + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromClientSecret(): %w", err) + } + return token, nil +} + +func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromAssertion(): %w", err) + } + return token, nil +} + +func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set(clientID, authParameters.ClientID) + qv.Set("client_secret", clientSecret) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return DeviceCodeResult{}, err + } + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + endpoint := strings.Replace(authParameters.Endpoints.TokenEndpoint, "token", "devicecode", -1) + + resp := DeviceCodeResponse{} + err := c.Comm.URLFormCall(ctx, endpoint, qv, &resp) + if err != nil { + return DeviceCodeResult{}, err + } + + return resp.Convert(authParameters.ClientID, authParameters.Scopes), nil +} + +func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.DeviceCode) + qv.Set(deviceCode, deviceCodeResult.DeviceCode) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("assertion", base64.StdEncoding.WithPadding(base64.StdPadding).EncodeToString([]byte(samlGrant.Assertion))) + addScopeQueryParam(qv, authParameters) + + switch samlGrant.AssertionType { + case grant.SAMLV1: + qv.Set(grantType, grant.SAMLV1) + case grant.SAMLV2: + qv.Set(grantType, grant.SAMLV2) + default: + return TokenResponse{}, fmt.Errorf("GetAccessTokenFromSamlGrant returned unknown SAML assertion type: %q", samlGrant.AssertionType) + } + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) { + resp := TokenResponse{} + err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp) + if err != nil { + return resp, err + } + resp.ComputeScope(authParams) + if c.testing { + return resp, nil + } + return resp, resp.Validate() +} + +// prepURLVals returns an url.Values that sets various key/values if we are doing secrets +// or JWT assertions. +func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) { + params := url.Values{} + if cc.Secret != "" { + params.Set("client_secret", cc.Secret) + return params, nil + } + + jwt, err := cc.JWT(ctx, authParams) + if err != nil { + return nil, err + } + params.Set("client_assertion", jwt) + params.Set("client_assertion_type", grant.ClientAssertion) + return params, nil +} + +// openid required to get an id token +// offline_access required to get a refresh token +// profile required to get the client_info field back +var detectDefaultScopes = map[string]bool{ + "openid": true, + "offline_access": true, + "profile": true, +} + +var defaultScopes = []string{"openid", "offline_access", "profile"} + +func AppendDefaultScopes(authParameters authority.AuthParams) []string { + scopes := make([]string, 0, len(authParameters.Scopes)+len(defaultScopes)) + for _, scope := range authParameters.Scopes { + s := strings.TrimSpace(scope) + if s == "" { + continue + } + if detectDefaultScopes[scope] { + continue + } + scopes = append(scopes, scope) + } + scopes = append(scopes, defaultScopes...) + return scopes +} + +// addClaims adds client capabilities and claims from AuthParams to the given url.Values +func addClaims(v url.Values, ap authority.AuthParams) error { + claims, err := ap.MergeCapabilitiesAndClaims() + if err == nil && claims != "" { + v.Set("claims", claims) + } + return err +} + +func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) { + scopes := AppendDefaultScopes(authParameters) + queryParams.Set("scope", strings.Join(scopes, " ")) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go new file mode 100644 index 00000000000..3bec4a67cf1 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=AppType"; DO NOT EDIT. + +package accesstokens + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATPublic-1] + _ = x[ATConfidential-2] +} + +const _AppType_name = "ATUnknownATPublicATConfidential" + +var _AppType_index = [...]uint8{0, 9, 17, 31} + +func (i AppType) String() string { + if i < 0 || i >= AppType(len(_AppType_index)-1) { + return "AppType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AppType_name[_AppType_index[i]:_AppType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go new file mode 100644 index 00000000000..b3892bf3f32 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -0,0 +1,335 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package accesstokens + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// IDToken consists of all the information used to validate a user. +// https://docs.microsoft.com/azure/active-directory/develop/id-tokens . +type IDToken struct { + PreferredUsername string `json:"preferred_username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + Oid string `json:"oid,omitempty"` + TenantID string `json:"tid,omitempty"` + Subject string `json:"sub,omitempty"` + UPN string `json:"upn,omitempty"` + Email string `json:"email,omitempty"` + AlternativeID string `json:"alternative_id,omitempty"` + Issuer string `json:"iss,omitempty"` + Audience string `json:"aud,omitempty"` + ExpirationTime int64 `json:"exp,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + RawToken string + + AdditionalFields map[string]interface{} +} + +var null = []byte("null") + +// UnmarshalJSON implements json.Unmarshaler. +func (i *IDToken) UnmarshalJSON(b []byte) error { + if bytes.Equal(null, b) { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type idToken2 IDToken + + jwt := strings.Trim(string(b), `"`) + jwtArr := strings.Split(jwt, ".") + if len(jwtArr) < 2 { + return errors.New("IDToken returned from server is invalid") + } + + jwtPart := jwtArr[1] + jwtDecoded, err := decodeJWT(jwtPart) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken, problem decoding JWT: %w", err) + } + + token := idToken2{} + err = json.Unmarshal(jwtDecoded, &token) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken: %w", err) + } + token.RawToken = jwt + + *i = IDToken(token) + return nil +} + +// IsZero indicates if the IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// LocalAccountID extracts an account's local account ID from an ID token. +func (i IDToken) LocalAccountID() string { + if i.Oid != "" { + return i.Oid + } + return i.Subject +} + +// jwtDecoder is provided to allow tests to provide their own. +var jwtDecoder = decodeJWT + +// ClientInfo is used to create a Home Account ID for an account. +type ClientInfo struct { + UID string `json:"uid"` + UTID string `json:"utid"` + + AdditionalFields map[string]interface{} +} + +// UnmarshalJSON implements json.Unmarshaler.s +func (c *ClientInfo) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), `"`) + // Client info may be empty in some flows, e.g. certificate exchange. + if len(s) == 0 { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type clientInfo2 ClientInfo + + raw, err := jwtDecoder(s) + if err != nil { + return fmt.Errorf("TokenResponse client_info field had JWT decode error: %w", err) + } + + var c2 clientInfo2 + + err = json.Unmarshal(raw, &c2) + if err != nil { + return fmt.Errorf("was unable to unmarshal decoded JWT in TokenRespone to ClientInfo: %w", err) + } + + *c = ClientInfo(c2) + return nil +} + +// HomeAccountID creates the home account ID. +func (c ClientInfo) HomeAccountID() string { + if c.UID == "" { + return "" + } else if c.UTID == "" { + return fmt.Sprintf("%s.%s", c.UID, c.UID) + } else { + return fmt.Sprintf("%s.%s", c.UID, c.UTID) + } +} + +// Scopes represents scopes in a TokenResponse. +type Scopes struct { + Slice []string +} + +// UnmarshalJSON implements json.Unmarshal. +func (s *Scopes) UnmarshalJSON(b []byte) error { + str := strings.Trim(string(b), `"`) + if len(str) == 0 { + return nil + } + sl := strings.Split(str, " ") + s.Slice = sl + return nil +} + +// TokenResponse is the information that is returned from a token endpoint during a token acquisition flow. +type TokenResponse struct { + authority.OAuthResponseBase + + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + FamilyID string `json:"foci"` + IDToken IDToken `json:"id_token"` + ClientInfo ClientInfo `json:"client_info"` + ExpiresOn internalTime.DurationTime `json:"expires_in"` + ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"` + GrantedScopes Scopes `json:"scope"` + DeclinedScopes []string // This is derived + + AdditionalFields map[string]interface{} + + scopesComputed bool +} + +// ComputeScope computes the final scopes based on what was granted by the server and +// what our AuthParams were from the authority server. Per OAuth spec, if no scopes are returned, the response should be treated as if all scopes were granted +// This behavior can be observed in client assertion flows, but can happen at any time, this check ensures we treat +// those special responses properly Link to spec: https://tools.ietf.org/html/rfc6749#section-3.3 +func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) { + if len(tr.GrantedScopes.Slice) == 0 { + tr.GrantedScopes = Scopes{Slice: authParams.Scopes} + } else { + tr.DeclinedScopes = findDeclinedScopes(authParams.Scopes, tr.GrantedScopes.Slice) + } + tr.scopesComputed = true +} + +// Validate validates the TokenResponse has basic valid values. It must be called +// after ComputeScopes() is called. +func (tr *TokenResponse) Validate() error { + if tr.Error != "" { + return fmt.Errorf("%s: %s", tr.Error, tr.ErrorDescription) + } + + if tr.AccessToken == "" { + return errors.New("response is missing access_token") + } + + if !tr.scopesComputed { + return fmt.Errorf("TokenResponse hasn't had ScopesComputed() called") + } + return nil +} + +func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string { + if authParams.AuthorizationType == authority.ATOnBehalfOf { + return authParams.AssertionHash() + } + if authParams.AuthorizationType == authority.ATClientCredentials { + return authParams.AppKey() + } + if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken { + return tr.ClientInfo.HomeAccountID() + } + return "" +} + +func findDeclinedScopes(requestedScopes []string, grantedScopes []string) []string { + declined := []string{} + grantedMap := map[string]bool{} + for _, s := range grantedScopes { + grantedMap[strings.ToLower(s)] = true + } + // Comparing the requested scopes with the granted scopes to see if there are any scopes that have been declined. + for _, r := range requestedScopes { + if !grantedMap[strings.ToLower(r)] { + declined = append(declined, r) + } + } + return declined +} + +// decodeJWT decodes a JWT and converts it to a byte array representing a JSON object +// JWT has headers and payload base64url encoded without padding +// https://tools.ietf.org/html/rfc7519#section-3 and +// https://tools.ietf.org/html/rfc7515#section-2 +func decodeJWT(data string) ([]byte, error) { + // https://tools.ietf.org/html/rfc7515#appendix-C + return base64.RawURLEncoding.DecodeString(data) +} + +// RefreshToken is the JSON representation of a MSAL refresh token for encoding to storage. +type RefreshToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + FamilyID string `json:"family_id,omitempty"` + Secret string `json:"secret,omitempty"` + Realm string `json:"realm,omitempty"` + Target string `json:"target,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewRefreshToken is the constructor for RefreshToken. +func NewRefreshToken(homeID, env, clientID, refreshToken, familyID string) RefreshToken { + return RefreshToken{ + HomeAccountID: homeID, + Environment: env, + CredentialType: "RefreshToken", + ClientID: clientID, + FamilyID: familyID, + Secret: refreshToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (rt RefreshToken) Key() string { + var fourth = rt.FamilyID + if fourth == "" { + fourth = rt.ClientID + } + + return strings.Join( + []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth}, + shared.CacheKeySeparator, + ) +} + +func (rt RefreshToken) GetSecret() string { + return rt.Secret +} + +// DeviceCodeResult stores the response from the STS device code endpoint. +type DeviceCodeResult struct { + // UserCode is the code the user needs to provide when authentication at the verification URI. + UserCode string + // DeviceCode is the code used in the access token request. + DeviceCode string + // VerificationURL is the the URL where user can authenticate. + VerificationURL string + // ExpiresOn is the expiration time of device code in seconds. + ExpiresOn time.Time + // Interval is the interval at which the STS should be polled at. + Interval int + // Message is the message which should be displayed to the user. + Message string + // ClientID is the UUID issued by the authorization server for your application. + ClientID string + // Scopes is the OpenID scopes used to request access a protected API. + Scopes []string +} + +// NewDeviceCodeResult creates a DeviceCodeResult instance. +func NewDeviceCodeResult(userCode, deviceCode, verificationURL string, expiresOn time.Time, interval int, message, clientID string, scopes []string) DeviceCodeResult { + return DeviceCodeResult{userCode, deviceCode, verificationURL, expiresOn, interval, message, clientID, scopes} +} + +func (dcr DeviceCodeResult) String() string { + return fmt.Sprintf("UserCode: (%v)\nDeviceCode: (%v)\nURL: (%v)\nMessage: (%v)\n", dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, dcr.Message) + +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go new file mode 100644 index 00000000000..7b2ccb4f5d2 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -0,0 +1,552 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package authority + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" + + "github.com/google/uuid" +) + +const ( + authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" + instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" + regionName = "REGION_NAME" + defaultAPIVersion = "2021-10-01" + imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion + autoDetectRegion = "TryAutoDetect" +) + +// These are various hosts that host AAD Instance discovery endpoints. +const ( + defaultHost = "login.microsoftonline.com" + loginMicrosoft = "login.microsoft.com" + loginWindows = "login.windows.net" + loginSTSWindows = "sts.windows.net" + loginMicrosoftOnline = defaultHost +) + +// jsonCaller is an interface that allows us to mock the JSONCall method. +type jsonCaller interface { + JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error +} + +var aadTrustedHostList = map[string]bool{ + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.chinacloudapi.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide + "login.cloudgovapi.us": true, // Microsoft Azure US Government +} + +// TrustedHost checks if an AAD host is trusted/valid. +func TrustedHost(host string) bool { + if _, ok := aadTrustedHostList[host]; ok { + return true + } + return false +} + +// OAuthResponseBase is the base JSON return message for an OAuth call. +// This is embedded in other calls to get the base fields from every response. +type OAuthResponseBase struct { + Error string `json:"error"` + SubError string `json:"suberror"` + ErrorDescription string `json:"error_description"` + ErrorCodes []int `json:"error_codes"` + CorrelationID string `json:"correlation_id"` + Claims string `json:"claims"` +} + +// TenantDiscoveryResponse is the tenant endpoints from the OpenID configuration endpoint. +type TenantDiscoveryResponse struct { + OAuthResponseBase + + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + Issuer string `json:"issuer"` + + AdditionalFields map[string]interface{} +} + +// Validate validates that the response had the correct values required. +func (r *TenantDiscoveryResponse) Validate() error { + switch "" { + case r.AuthorizationEndpoint: + return errors.New("TenantDiscoveryResponse: authorize endpoint was not found in the openid configuration") + case r.TokenEndpoint: + return errors.New("TenantDiscoveryResponse: token endpoint was not found in the openid configuration") + case r.Issuer: + return errors.New("TenantDiscoveryResponse: issuer was not found in the openid configuration") + } + return nil +} + +type InstanceDiscoveryMetadata struct { + PreferredNetwork string `json:"preferred_network"` + PreferredCache string `json:"preferred_cache"` + Aliases []string `json:"aliases"` + + AdditionalFields map[string]interface{} +} + +type InstanceDiscoveryResponse struct { + TenantDiscoveryEndpoint string `json:"tenant_discovery_endpoint"` + Metadata []InstanceDiscoveryMetadata `json:"metadata"` + + AdditionalFields map[string]interface{} +} + +//go:generate stringer -type=AuthorizeType + +// AuthorizeType represents the type of token flow. +type AuthorizeType int + +// These are all the types of token flows. +const ( + ATUnknown AuthorizeType = iota + ATUsernamePassword + ATWindowsIntegrated + ATAuthCode + ATInteractive + ATClientCredentials + ATDeviceCode + ATRefreshToken + AccountByID + ATOnBehalfOf +) + +// These are all authority types +const ( + AAD = "MSSTS" + ADFS = "ADFS" +) + +// AuthParams represents the parameters used for authorization for token acquisition. +type AuthParams struct { + AuthorityInfo Info + CorrelationID string + Endpoints Endpoints + ClientID string + // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow). + Redirecturi string + HomeAccountID string + // Username is the user-name portion for username/password auth flow. + Username string + // Password is the password portion for username/password auth flow. + Password string + // Scopes is the list of scopes the user consents to. + Scopes []string + // AuthorizationType specifies the auth flow being used. + AuthorizationType AuthorizeType + // State is a random value used to prevent cross-site request forgery attacks. + State string + // CodeChallenge is derived from a code verifier and is sent in the auth request. + CodeChallenge string + // CodeChallengeMethod describes the method used to create the CodeChallenge. + CodeChallengeMethod string + // Prompt specifies the user prompt type during interactive auth. + Prompt string + // IsConfidentialClient specifies if it is a confidential client. + IsConfidentialClient bool + // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS. + SendX5C bool + // UserAssertion is the access token used to acquire token on behalf of user + UserAssertion string + // Capabilities the client will include with each token request, for example "CP1". + // Call [NewClientCapabilities] to construct a value for this field. + Capabilities ClientCapabilities + // Claims required for an access token to satisfy a conditional access policy + Claims string + // KnownAuthorityHosts don't require metadata discovery because they're known to the user + KnownAuthorityHosts []string + // LoginHint is a username with which to pre-populate account selection during interactive auth + LoginHint string + // DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page + DomainHint string +} + +// NewAuthParams creates an authorization parameters object. +func NewAuthParams(clientID string, authorityInfo Info) AuthParams { + return AuthParams{ + ClientID: clientID, + AuthorityInfo: authorityInfo, + CorrelationID: uuid.New().String(), + } +} + +// WithTenant returns a copy of the AuthParams having the specified tenant ID. If the given +// ID is empty, the copy is identical to the original. This function returns an error in +// several cases: +// - ID isn't specific (for example, it's "common") +// - ID is non-empty and the authority doesn't support tenants (for example, it's an ADFS authority) +// - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint +// - the resulting authority URL is invalid +func (p AuthParams) WithTenant(ID string) (AuthParams, error) { + switch ID { + case "", p.AuthorityInfo.Tenant: + // keep the default tenant because the caller didn't override it + return p, nil + case "common", "consumers", "organizations": + if p.AuthorityInfo.AuthorityType == AAD { + return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) + } + // else we'll return a better error below + } + if p.AuthorityInfo.AuthorityType != AAD { + return p, errors.New("the authority doesn't support tenants") + } + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) + info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) + if err == nil { + info.Region = p.AuthorityInfo.Region + p.AuthorityInfo = info + } + return p, err +} + +// MergeCapabilitiesAndClaims combines client capabilities and challenge claims into a value suitable for an authentication request's "claims" parameter. +func (p AuthParams) MergeCapabilitiesAndClaims() (string, error) { + claims := p.Claims + if len(p.Capabilities.asMap) > 0 { + if claims == "" { + // without claims the result is simply the capabilities + return p.Capabilities.asJSON, nil + } + // Otherwise, merge claims and capabilties into a single JSON object. + // We handle the claims challenge as a map because we don't know its structure. + var challenge map[string]any + if err := json.Unmarshal([]byte(claims), &challenge); err != nil { + return "", fmt.Errorf(`claims must be JSON. Are they base64 encoded? json.Unmarshal returned "%v"`, err) + } + if err := merge(p.Capabilities.asMap, challenge); err != nil { + return "", err + } + b, err := json.Marshal(challenge) + if err != nil { + return "", err + } + claims = string(b) + } + return claims, nil +} + +// merges a into b without overwriting b's values. Returns an error when a and b share a key for which either has a non-object value. +func merge(a, b map[string]any) error { + for k, av := range a { + if bv, ok := b[k]; !ok { + // b doesn't contain this key => simply set it to a's value + b[k] = av + } else { + // b does contain this key => recursively merge a[k] into b[k], provided both are maps. If a[k] or b[k] isn't + // a map, return an error because merging would overwrite some value in b. Errors shouldn't occur in practice + // because the challenge will be from AAD, which knows the capabilities format. + if A, ok := av.(map[string]any); ok { + if B, ok := bv.(map[string]any); ok { + return merge(A, B) + } else { + // b[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } else { + // a[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } + } + return nil +} + +// ClientCapabilities stores capabilities in the formats used by AuthParams.MergeCapabilitiesAndClaims. +// [NewClientCapabilities] precomputes these representations because capabilities are static for the +// lifetime of a client and are included with every authentication request i.e., these computations +// always have the same result and would otherwise have to be repeated for every request. +type ClientCapabilities struct { + // asJSON is for the common case: adding the capabilities to an auth request with no challenge claims + asJSON string + // asMap is for merging the capabilities with challenge claims + asMap map[string]any +} + +func NewClientCapabilities(capabilities []string) (ClientCapabilities, error) { + c := ClientCapabilities{} + var err error + if len(capabilities) > 0 { + cpbs := make([]string, len(capabilities)) + for i := 0; i < len(cpbs); i++ { + cpbs[i] = fmt.Sprintf(`"%s"`, capabilities[i]) + } + c.asJSON = fmt.Sprintf(`{"access_token":{"xms_cc":{"values":[%s]}}}`, strings.Join(cpbs, ",")) + // note our JSON is valid but we can't stop users breaking it with garbage like "}" + err = json.Unmarshal([]byte(c.asJSON), &c.asMap) + } + return c, err +} + +// Info consists of information about the authority. +type Info struct { + Host string + CanonicalAuthorityURI string + AuthorityType string + UserRealmURIPrefix string + ValidateAuthority bool + Tenant string + Region string + InstanceDiscoveryDisabled bool +} + +func firstPathSegment(u *url.URL) (string, error) { + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) >= 2 { + return pathParts[1], nil + } + + return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) +} + +// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. +func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { + u, err := url.Parse(strings.ToLower(authority)) + if err != nil || u.Scheme != "https" { + return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) + } + + tenant, err := firstPathSegment(u) + if err != nil { + return Info{}, err + } + authorityType := AAD + if tenant == "adfs" { + authorityType = ADFS + } + + // u.Host includes the port, if any, which is required for private cloud deployments + return Info{ + Host: u.Host, + CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + AuthorityType: authorityType, + UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), + ValidateAuthority: validateAuthority, + Tenant: tenant, + InstanceDiscoveryDisabled: instanceDiscoveryDisabled, + }, nil +} + +// Endpoints consists of the endpoints from the tenant discovery response. +type Endpoints struct { + AuthorizationEndpoint string + TokenEndpoint string + selfSignedJwtAudience string + authorityHost string +} + +// NewEndpoints creates an Endpoints object. +func NewEndpoints(authorizationEndpoint string, tokenEndpoint string, selfSignedJwtAudience string, authorityHost string) Endpoints { + return Endpoints{authorizationEndpoint, tokenEndpoint, selfSignedJwtAudience, authorityHost} +} + +// UserRealmAccountType refers to the type of user realm. +type UserRealmAccountType string + +// These are the different types of user realms. +const ( + Unknown UserRealmAccountType = "" + Federated UserRealmAccountType = "Federated" + Managed UserRealmAccountType = "Managed" +) + +// UserRealm is used for the username password request to determine user type +type UserRealm struct { + AccountType UserRealmAccountType `json:"account_type"` + DomainName string `json:"domain_name"` + CloudInstanceName string `json:"cloud_instance_name"` + CloudAudienceURN string `json:"cloud_audience_urn"` + + // required if accountType is Federated + FederationProtocol string `json:"federation_protocol"` + FederationMetadataURL string `json:"federation_metadata_url"` + + AdditionalFields map[string]interface{} +} + +func (u UserRealm) validate() error { + switch "" { + case string(u.AccountType): + return errors.New("the account type (Federated or Managed) is missing") + case u.DomainName: + return errors.New("domain name of user realm is missing") + case u.CloudInstanceName: + return errors.New("cloud instance name of user realm is missing") + case u.CloudAudienceURN: + return errors.New("cloud Instance URN is missing") + } + + if u.AccountType == Federated { + switch "" { + case u.FederationProtocol: + return errors.New("federation protocol of user realm is missing") + case u.FederationMetadataURL: + return errors.New("federation metadata URL of user realm is missing") + } + } + return nil +} + +// Client represents the REST calls to authority backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm jsonCaller // *comm.Client +} + +func (c Client) UserRealm(ctx context.Context, authParams AuthParams) (UserRealm, error) { + endpoint := fmt.Sprintf("https://%s/common/UserRealm/%s", authParams.Endpoints.authorityHost, url.PathEscape(authParams.Username)) + qv := url.Values{ + "api-version": []string{"1.0"}, + } + + resp := UserRealm{} + err := c.Comm.JSONCall( + ctx, + endpoint, + http.Header{"client-request-id": []string{authParams.CorrelationID}}, + qv, + nil, + &resp, + ) + if err != nil { + return resp, err + } + + return resp, resp.validate() +} + +func (c Client) GetTenantDiscoveryResponse(ctx context.Context, openIDConfigurationEndpoint string) (TenantDiscoveryResponse, error) { + resp := TenantDiscoveryResponse{} + err := c.Comm.JSONCall( + ctx, + openIDConfigurationEndpoint, + http.Header{}, + nil, + nil, + &resp, + ) + + return resp, err +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (InstanceDiscoveryResponse, error) { + region := "" + var err error + resp := InstanceDiscoveryResponse{} + if authorityInfo.Region != "" && authorityInfo.Region != autoDetectRegion { + region = authorityInfo.Region + } else if authorityInfo.Region == autoDetectRegion { + region = detectRegion(ctx) + } + if region != "" { + environment := authorityInfo.Host + switch environment { + case loginMicrosoft, loginWindows, loginSTSWindows, defaultHost: + environment = loginMicrosoft + } + + resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant) + metadata := InstanceDiscoveryMetadata{ + PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host), + PreferredCache: authorityInfo.Host, + Aliases: []string{fmt.Sprintf("%v.%v", region, authorityInfo.Host), authorityInfo.Host}, + } + resp.Metadata = []InstanceDiscoveryMetadata{metadata} + } else { + qv := url.Values{} + qv.Set("api-version", "1.1") + qv.Set("authorization_endpoint", fmt.Sprintf(authorizationEndpoint, authorityInfo.Host, authorityInfo.Tenant)) + + discoveryHost := defaultHost + if TrustedHost(authorityInfo.Host) { + discoveryHost = authorityInfo.Host + } + + endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) + } + return resp, err +} + +func detectRegion(ctx context.Context) string { + region := os.Getenv(regionName) + if region != "" { + region = strings.ReplaceAll(region, " ", "") + return strings.ToLower(region) + } + // HTTP call to IMDS endpoint to get region + // Refer : https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FPinAuthToRegion%2FAAD%20SDK%20Proposal%20to%20Pin%20Auth%20to%20region.md&_a=preview&version=GBdev + // Set a 2 second timeout for this http client which only does calls to IMDS endpoint + client := http.Client{ + Timeout: time.Duration(2 * time.Second), + } + req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req.Header.Set("Metadata", "true") + resp, err := client.Do(req) + // If the request times out or there is an error, it is retried once + if err != nil || resp.StatusCode != 200 { + resp, err = client.Do(req) + if err != nil || resp.StatusCode != 200 { + return "" + } + } + defer resp.Body.Close() + response, err := io.ReadAll(resp.Body) + if err != nil { + return "" + } + return string(response) +} + +func (a *AuthParams) CacheKey(isAppCache bool) string { + if a.AuthorizationType == ATOnBehalfOf { + return a.AssertionHash() + } + if a.AuthorizationType == ATClientCredentials || isAppCache { + return a.AppKey() + } + if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID { + return a.HomeAccountID + } + return "" +} +func (a *AuthParams) AssertionHash() string { + hasher := sha256.New() + // Per documentation this never returns an error : https://pkg.go.dev/hash#pkg-types + _, _ = hasher.Write([]byte(a.UserAssertion)) + sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + return sha +} + +func (a *AuthParams) AppKey() string { + if a.AuthorityInfo.Tenant != "" { + return fmt.Sprintf("%s_%s_AppTokenCache", a.ClientID, a.AuthorityInfo.Tenant) + } + return fmt.Sprintf("%s__AppTokenCache", a.ClientID) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go new file mode 100644 index 00000000000..10039773b06 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=AuthorizeType"; DO NOT EDIT. + +package authority + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATUsernamePassword-1] + _ = x[ATWindowsIntegrated-2] + _ = x[ATAuthCode-3] + _ = x[ATInteractive-4] + _ = x[ATClientCredentials-5] + _ = x[ATDeviceCode-6] + _ = x[ATRefreshToken-7] +} + +const _AuthorizeType_name = "ATUnknownATUsernamePasswordATWindowsIntegratedATAuthCodeATInteractiveATClientCredentialsATDeviceCodeATRefreshToken" + +var _AuthorizeType_index = [...]uint8{0, 9, 27, 46, 56, 69, 88, 100, 114} + +func (i AuthorizeType) String() string { + if i < 0 || i >= AuthorizeType(len(_AuthorizeType_index)-1) { + return "AuthorizeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AuthorizeType_name[_AuthorizeType_index[i]:_AuthorizeType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go new file mode 100644 index 00000000000..7d9ec7cd374 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -0,0 +1,320 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package comm provides helpers for communicating with HTTP backends. +package comm + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "runtime" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" + "github.com/google/uuid" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient interface { + // Do sends an HTTP request and returns an HTTP response. + Do(req *http.Request) (*http.Response, error) + + // CloseIdleConnections closes any idle connections in a "keep-alive" state. + CloseIdleConnections() +} + +// Client provides a wrapper to our *http.Client that handles compression and serialization needs. +type Client struct { + client HTTPClient +} + +// New returns a new Client object. +func New(httpClient HTTPClient) *Client { + if httpClient == nil { + panic("http.Client cannot == nil") + } + + return &Client{client: httpClient} +} + +// JSONCall connects to the REST endpoint passing the HTTP query values, headers and JSON conversion +// of body in the HTTP body. It automatically handles compression and decompression with gzip. The response is JSON +// unmarshalled into resp. resp must be a pointer to a struct. If the body struct contains a field called +// "AdditionalFields" we use a custom marshal/unmarshal engine. +func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error { + if qv == nil { + qv = url.Values{} + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute. + var marshal = json.Marshal + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + marshal = customJSON.Marshal + unmarshal = customJSON.Unmarshal + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + addStdHeaders(headers) + + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if body != nil { + // Note: In case your wondering why we are not gzip encoding.... + // I'm not sure if these various services support gzip on send. + headers.Add("Content-Type", "application/json; charset=utf-8") + data, err := marshal(body) + if err != nil { + return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) + } + req.Body = io.NopCloser(bytes.NewBuffer(data)) + req.Method = http.MethodPost + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) + } + } + return nil +} + +// XMLCall connects to an endpoint and decodes the XML response into resp. This is used when +// sending application/xml . If sending XML via SOAP, use SOAPCall(). +func (c *Client) XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error { + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/xml; charset=utf-8") // This was not set in he original Mex(), but... + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, "", resp) +} + +// SOAPCall returns the SOAP message given an endpoint, action, body of the request and the response object to marshal into. +func (c *Client) SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error { + if body == "" { + return fmt.Errorf("cannot make a SOAP call with body set to empty string") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/soap+xml; charset=utf-8") + headers.Set("SOAPAction", action) + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, body, resp) +} + +// xmlCall sends an XML in body and decodes into resp. This simply does the transport and relies on +// an upper level call to set things such as SOAP parameters and Content-Type, if required. +func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, body string, resp interface{}) error { + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if len(body) > 0 { + req.Method = http.MethodPost + req.Body = io.NopCloser(strings.NewReader(body)) + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + return xml.Unmarshal(data, resp) +} + +// URLFormCall is used to make a call where we need to send application/x-www-form-urlencoded data +// to the backend and receive JSON back. qv will be encoded into the request body. +func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error { + if len(qv) == 0 { + return fmt.Errorf("URLFormCall() requires qv to have non-zero length") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + + headers := http.Header{} + headers.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + addStdHeaders(headers) + + enc := qv.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: u, + Header: headers, + ContentLength: int64(len(enc)), + Body: io.NopCloser(strings.NewReader(enc)), + GetBody: func() (io.ReadCloser, error) { + return io.NopCloser(strings.NewReader(enc)), nil + }, + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + unmarshal = customJSON.Unmarshal + } + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data)) + } + } + return nil +} + +// do makes the HTTP call to the server and returns the contents of the body. +func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + } + req = req.WithContext(ctx) + + reply, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("server response error:\n %w", err) + } + defer reply.Body.Close() + + data, err := c.readBody(reply) + if err != nil { + return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err) + } + reply.Body = io.NopCloser(bytes.NewBuffer(data)) + + // NOTE: This doesn't happen immediately after the call so that we can get an error message + // from the server and include it in our error. + switch reply.StatusCode { + case 200, 201: + default: + sd := strings.TrimSpace(string(data)) + if sd != "" { + // We probably have the error in the body. + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, sd), + } + } + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, reply.StatusCode), + } + } + + return data, nil +} + +// checkResp checks a response object o make sure it is a pointer to a struct. +func (c *Client) checkResp(v reflect.Value) error { + if v.Kind() != reflect.Ptr { + return fmt.Errorf("bug: resp argument must a *struct, was %T", v.Interface()) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: resp argument must be a *struct, was %T", v.Interface()) + } + return nil +} + +// readBody reads the body out of an *http.Response. It supports gzip encoded responses. +func (c *Client) readBody(resp *http.Response) ([]byte, error) { + var reader io.Reader = resp.Body + switch resp.Header.Get("Content-Encoding") { + case "": + // Do nothing + case "gzip": + reader = gzipDecompress(resp.Body) + default: + return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding")) + } + return io.ReadAll(reader) +} + +var testID string + +// addStdHeaders adds the standard headers we use on all calls. +func addStdHeaders(headers http.Header) http.Header { + headers.Set("Accept-Encoding", "gzip") + // So that I can have a static id for tests. + if testID != "" { + headers.Set("client-request-id", testID) + headers.Set("Return-Client-Request-Id", "false") + } else { + headers.Set("client-request-id", uuid.New().String()) + headers.Set("Return-Client-Request-Id", "false") + } + headers.Set("x-client-sku", "MSAL.Go") + headers.Set("x-client-os", runtime.GOOS) + headers.Set("x-client-cpu", runtime.GOARCH) + headers.Set("x-client-ver", version.Version) + return headers +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go new file mode 100644 index 00000000000..4d3dbfcf0a6 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package comm + +import ( + "compress/gzip" + "io" +) + +func gzipDecompress(r io.Reader) io.Reader { + gzipReader, _ := gzip.NewReader(r) + + pipeOut, pipeIn := io.Pipe() + go func() { + // decompression bomb would have to come from Azure services. + // If we want to limit, we should do that in comm.do(). + _, err := io.Copy(pipeIn, gzipReader) //nolint + if err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + gzipReader.Close() + return + } + if err := gzipReader.Close(); err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + return + } + pipeIn.Close() + }() + return pipeOut +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go new file mode 100644 index 00000000000..b628f61ac08 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package grant holds types of grants issued by authorization services. +package grant + +const ( + Password = "password" + JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer" + SAMLV1 = "urn:ietf:params:oauth:grant-type:saml1_1-bearer" + SAMLV2 = "urn:ietf:params:oauth:grant-type:saml2-bearer" + DeviceCode = "device_code" + AuthCode = "authorization_code" + RefreshToken = "refresh_token" + ClientCredential = "client_credentials" + ClientAssertion = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" +) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go new file mode 100644 index 00000000000..1f9c543fa3b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package ops provides operations to various backend services using REST clients. + +The REST type provides several clients that can be used to communicate to backends. +Usage is simple: + + rest := ops.New() + + // Creates an authority client and calls the UserRealm() method. + userRealm, err := rest.Authority().UserRealm(ctx, authParameters) + if err != nil { + // Do something + } +*/ +package ops + +import ( + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient = comm.HTTPClient + +// REST provides REST clients for communicating with various backends used by MSAL. +type REST struct { + client *comm.Client +} + +// New is the constructor for REST. +func New(httpClient HTTPClient) *REST { + return &REST{client: comm.New(httpClient)} +} + +// Authority returns a client for querying information about various authorities. +func (r *REST) Authority() authority.Client { + return authority.Client{Comm: r.client} +} + +// AccessTokens returns a client that can be used to get various access tokens for +// authorization purposes. +func (r *REST) AccessTokens() accesstokens.Client { + return accesstokens.Client{Comm: r.client} +} + +// WSTrust provides access to various metadata in a WSTrust service. This data can +// be used to gain tokens based on SAML data using the client provided by AccessTokens(). +func (r *REST) WSTrust() wstrust.Client { + return wstrust.Client{Comm: r.client} +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go new file mode 100644 index 00000000000..a2bb6278ae5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=endpointType"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[etUnknown-0] + _ = x[etUsernamePassword-1] + _ = x[etWindowsTransport-2] +} + +const _endpointType_name = "etUnknownetUsernamePasswordetWindowsTransport" + +var _endpointType_index = [...]uint8{0, 9, 27, 45} + +func (i endpointType) String() string { + if i < 0 || i >= endpointType(len(_endpointType_index)-1) { + return "endpointType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _endpointType_name[_endpointType_index[i]:_endpointType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go new file mode 100644 index 00000000000..6497270028d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go @@ -0,0 +1,394 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +type Definitions struct { + XMLName xml.Name `xml:"definitions"` + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + TargetNamespace string `xml:"targetNamespace,attr"` + WSDL string `xml:"wsdl,attr"` + XSD string `xml:"xsd,attr"` + T string `xml:"t,attr"` + SOAPENC string `xml:"soapenc,attr"` + SOAP string `xml:"soap,attr"` + TNS string `xml:"tns,attr"` + MSC string `xml:"msc,attr"` + WSAM string `xml:"wsam,attr"` + SOAP12 string `xml:"soap12,attr"` + WSA10 string `xml:"wsa10,attr"` + WSA string `xml:"wsa,attr"` + WSAW string `xml:"wsaw,attr"` + WSX string `xml:"wsx,attr"` + WSAP string `xml:"wsap,attr"` + WSU string `xml:"wsu,attr"` + Trust string `xml:"trust,attr"` + WSP string `xml:"wsp,attr"` + Policy []Policy `xml:"Policy"` + Types Types `xml:"types"` + Message []Message `xml:"message"` + PortType []PortType `xml:"portType"` + Binding []Binding `xml:"binding"` + Service Service `xml:"service"` +} + +type Policy struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + ExactlyOne ExactlyOne `xml:"ExactlyOne"` +} + +type ExactlyOne struct { + Text string `xml:",chardata"` + All All `xml:"All"` +} + +type All struct { + Text string `xml:",chardata"` + NegotiateAuthentication NegotiateAuthentication `xml:"NegotiateAuthentication"` + TransportBinding TransportBinding `xml:"TransportBinding"` + UsingAddressing Text `xml:"UsingAddressing"` + EndorsingSupportingTokens EndorsingSupportingTokens `xml:"EndorsingSupportingTokens"` + WSS11 WSS11 `xml:"Wss11"` + Trust10 Trust10 `xml:"Trust10"` + SignedSupportingTokens SignedSupportingTokens `xml:"SignedSupportingTokens"` + Trust13 WSTrust13 `xml:"Trust13"` + SignedEncryptedSupportingTokens SignedEncryptedSupportingTokens `xml:"SignedEncryptedSupportingTokens"` +} + +type NegotiateAuthentication struct { + Text string `xml:",chardata"` + HTTP string `xml:"http,attr"` + XMLName xml.Name +} + +type TransportBinding struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy TransportBindingPolicy `xml:"Policy"` +} + +type TransportBindingPolicy struct { + Text string `xml:",chardata"` + TransportToken TransportToken `xml:"TransportToken"` + AlgorithmSuite AlgorithmSuite `xml:"AlgorithmSuite"` + Layout Layout `xml:"Layout"` + IncludeTimestamp Text `xml:"IncludeTimestamp"` +} + +type TransportToken struct { + Text string `xml:",chardata"` + Policy TransportTokenPolicy `xml:"Policy"` +} + +type TransportTokenPolicy struct { + Text string `xml:",chardata"` + HTTPSToken HTTPSToken `xml:"HttpsToken"` +} + +type HTTPSToken struct { + Text string `xml:",chardata"` + RequireClientCertificate string `xml:"RequireClientCertificate,attr"` +} + +type AlgorithmSuite struct { + Text string `xml:",chardata"` + Policy AlgorithmSuitePolicy `xml:"Policy"` +} + +type AlgorithmSuitePolicy struct { + Text string `xml:",chardata"` + Basic256 Text `xml:"Basic256"` + Basic128 Text `xml:"Basic128"` +} + +type Layout struct { + Text string `xml:",chardata"` + Policy LayoutPolicy `xml:"Policy"` +} + +type LayoutPolicy struct { + Text string `xml:",chardata"` + Strict Text `xml:"Strict"` +} + +type EndorsingSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy EndorsingSupportingTokensPolicy `xml:"Policy"` +} + +type EndorsingSupportingTokensPolicy struct { + Text string `xml:",chardata"` + X509Token X509Token `xml:"X509Token"` + RSAToken RSAToken `xml:"RsaToken"` + SignedParts SignedParts `xml:"SignedParts"` + KerberosToken KerberosToken `xml:"KerberosToken"` + IssuedToken IssuedToken `xml:"IssuedToken"` + KeyValueToken KeyValueToken `xml:"KeyValueToken"` +} + +type X509Token struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy X509TokenPolicy `xml:"Policy"` +} + +type X509TokenPolicy struct { + Text string `xml:",chardata"` + RequireThumbprintReference Text `xml:"RequireThumbprintReference"` + WSSX509V3Token10 Text `xml:"WssX509V3Token10"` +} + +type RSAToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` + MSSP string `xml:"mssp,attr"` +} + +type SignedParts struct { + Text string `xml:",chardata"` + Header SignedPartsHeader `xml:"Header"` +} + +type SignedPartsHeader struct { + Text string `xml:",chardata"` + Name string `xml:"Name,attr"` + Namespace string `xml:"Namespace,attr"` +} + +type KerberosToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy KerberosTokenPolicy `xml:"Policy"` +} + +type KerberosTokenPolicy struct { + Text string `xml:",chardata"` + WSSGSSKerberosV5ApReqToken11 Text `xml:"WssGssKerberosV5ApReqToken11"` +} + +type IssuedToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + RequestSecurityTokenTemplate RequestSecurityTokenTemplate `xml:"RequestSecurityTokenTemplate"` + Policy IssuedTokenPolicy `xml:"Policy"` +} + +type RequestSecurityTokenTemplate struct { + Text string `xml:",chardata"` + KeyType Text `xml:"KeyType"` + EncryptWith Text `xml:"EncryptWith"` + SignatureAlgorithm Text `xml:"SignatureAlgorithm"` + CanonicalizationAlgorithm Text `xml:"CanonicalizationAlgorithm"` + EncryptionAlgorithm Text `xml:"EncryptionAlgorithm"` + KeySize Text `xml:"KeySize"` + KeyWrapAlgorithm Text `xml:"KeyWrapAlgorithm"` +} + +type IssuedTokenPolicy struct { + Text string `xml:",chardata"` + RequireInternalReference Text `xml:"RequireInternalReference"` +} + +type KeyValueToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` +} + +type WSS11 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Wss11Policy `xml:"Policy"` +} + +type Wss11Policy struct { + Text string `xml:",chardata"` + MustSupportRefThumbprint Text `xml:"MustSupportRefThumbprint"` +} + +type Trust10 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Trust10Policy `xml:"Policy"` +} + +type Trust10Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type SupportingTokensPolicy struct { + Text string `xml:",chardata"` + UsernameToken UsernameToken `xml:"UsernameToken"` +} +type UsernameToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy UsernameTokenPolicy `xml:"Policy"` +} + +type UsernameTokenPolicy struct { + Text string `xml:",chardata"` + WSSUsernameToken10 WSSUsernameToken10 `xml:"WssUsernameToken10"` +} + +type WSSUsernameToken10 struct { + Text string `xml:",chardata"` + XMLName xml.Name +} + +type WSTrust13 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy WSTrust13Policy `xml:"Policy"` +} + +type WSTrust13Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedEncryptedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type Types struct { + Text string `xml:",chardata"` + Schema Schema `xml:"schema"` +} + +type Schema struct { + Text string `xml:",chardata"` + TargetNamespace string `xml:"targetNamespace,attr"` + Import []Import `xml:"import"` +} + +type Import struct { + Text string `xml:",chardata"` + SchemaLocation string `xml:"schemaLocation,attr"` + Namespace string `xml:"namespace,attr"` +} + +type Message struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Part Part `xml:"part"` +} + +type Part struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Element string `xml:"element,attr"` +} + +type PortType struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation Operation `xml:"operation"` +} + +type Operation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Input OperationIO `xml:"input"` + Output OperationIO `xml:"output"` +} + +type OperationIO struct { + Text string `xml:",chardata"` + Action string `xml:"Action,attr"` + Message string `xml:"message,attr"` + Body OperationIOBody `xml:"body"` +} + +type OperationIOBody struct { + Text string `xml:",chardata"` + Use string `xml:"use,attr"` +} + +type Binding struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + PolicyReference PolicyReference `xml:"PolicyReference"` + Binding DefinitionsBinding `xml:"binding"` + Operation BindingOperation `xml:"operation"` +} + +type PolicyReference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` +} + +type DefinitionsBinding struct { + Text string `xml:",chardata"` + Transport string `xml:"transport,attr"` +} + +type BindingOperation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation BindingOperationOperation `xml:"operation"` + Input BindingOperationIO `xml:"input"` + Output BindingOperationIO `xml:"output"` +} + +type BindingOperationOperation struct { + Text string `xml:",chardata"` + SoapAction string `xml:"soapAction,attr"` + Style string `xml:"style,attr"` +} + +type BindingOperationIO struct { + Text string `xml:",chardata"` + Body OperationIOBody `xml:"body"` +} + +type Service struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Port []Port `xml:"port"` +} + +type Port struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Binding string `xml:"binding,attr"` + Address Address `xml:"address"` + EndpointReference PortEndpointReference `xml:"EndpointReference"` +} + +type Address struct { + Text string `xml:",chardata"` + Location string `xml:"location,attr"` +} + +type PortEndpointReference struct { + Text string `xml:",chardata"` + Address Text `xml:"Address"` + Identity Identity `xml:"Identity"` +} + +type Identity struct { + Text string `xml:",chardata"` + XMLNS string `xml:"xmlns,attr"` + SPN Text `xml:"Spn"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go new file mode 100644 index 00000000000..7d072556577 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go @@ -0,0 +1,230 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +// TODO(msal): Someone (and it ain't gonna be me) needs to document these attributes or +// at the least put a link to RFC. + +type SAMLDefinitions struct { + XMLName xml.Name `xml:"Envelope"` + Text string `xml:",chardata"` + S string `xml:"s,attr"` + A string `xml:"a,attr"` + U string `xml:"u,attr"` + Header Header `xml:"Header"` + Body Body `xml:"Body"` +} + +type Header struct { + Text string `xml:",chardata"` + Action Action `xml:"Action"` + Security Security `xml:"Security"` +} + +type Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` +} + +type Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` + O string `xml:"o,attr"` + Timestamp Timestamp `xml:"Timestamp"` +} + +type Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + Created Text `xml:"Created"` + Expires Text `xml:"Expires"` +} + +type Text struct { + Text string `xml:",chardata"` +} + +type Body struct { + Text string `xml:",chardata"` + RequestSecurityTokenResponseCollection RequestSecurityTokenResponseCollection `xml:"RequestSecurityTokenResponseCollection"` +} + +type RequestSecurityTokenResponseCollection struct { + Text string `xml:",chardata"` + Trust string `xml:"trust,attr"` + RequestSecurityTokenResponse []RequestSecurityTokenResponse `xml:"RequestSecurityTokenResponse"` +} + +type RequestSecurityTokenResponse struct { + Text string `xml:",chardata"` + Lifetime Lifetime `xml:"Lifetime"` + AppliesTo AppliesTo `xml:"AppliesTo"` + RequestedSecurityToken RequestedSecurityToken `xml:"RequestedSecurityToken"` + RequestedAttachedReference RequestedAttachedReference `xml:"RequestedAttachedReference"` + RequestedUnattachedReference RequestedUnattachedReference `xml:"RequestedUnattachedReference"` + TokenType Text `xml:"TokenType"` + RequestType Text `xml:"RequestType"` + KeyType Text `xml:"KeyType"` +} + +type Lifetime struct { + Text string `xml:",chardata"` + Created WSUTimestamp `xml:"Created"` + Expires WSUTimestamp `xml:"Expires"` +} + +type WSUTimestamp struct { + Text string `xml:",chardata"` + Wsu string `xml:"wsu,attr"` +} + +type AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"wsp,attr"` + EndpointReference EndpointReference `xml:"EndpointReference"` +} + +type EndpointReference struct { + Text string `xml:",chardata"` + Wsa string `xml:"wsa,attr"` + Address Text `xml:"Address"` +} + +type RequestedSecurityToken struct { + Text string `xml:",chardata"` + AssertionRawXML string `xml:",innerxml"` + Assertion Assertion `xml:"Assertion"` +} + +type Assertion struct { + XMLName xml.Name // Normally its `xml:"Assertion"`, but I think they want to capture the xmlns + Text string `xml:",chardata"` + MajorVersion string `xml:"MajorVersion,attr"` + MinorVersion string `xml:"MinorVersion,attr"` + AssertionID string `xml:"AssertionID,attr"` + Issuer string `xml:"Issuer,attr"` + IssueInstant string `xml:"IssueInstant,attr"` + Saml string `xml:"saml,attr"` + Conditions Conditions `xml:"Conditions"` + AttributeStatement AttributeStatement `xml:"AttributeStatement"` + AuthenticationStatement AuthenticationStatement `xml:"AuthenticationStatement"` + Signature Signature `xml:"Signature"` +} + +type Conditions struct { + Text string `xml:",chardata"` + NotBefore string `xml:"NotBefore,attr"` + NotOnOrAfter string `xml:"NotOnOrAfter,attr"` + AudienceRestrictionCondition AudienceRestrictionCondition `xml:"AudienceRestrictionCondition"` +} + +type AudienceRestrictionCondition struct { + Text string `xml:",chardata"` + Audience Text `xml:"Audience"` +} + +type AttributeStatement struct { + Text string `xml:",chardata"` + Subject Subject `xml:"Subject"` + Attribute []Attribute `xml:"Attribute"` +} + +type Subject struct { + Text string `xml:",chardata"` + NameIdentifier NameIdentifier `xml:"NameIdentifier"` + SubjectConfirmation SubjectConfirmation `xml:"SubjectConfirmation"` +} + +type NameIdentifier struct { + Text string `xml:",chardata"` + Format string `xml:"Format,attr"` +} + +type SubjectConfirmation struct { + Text string `xml:",chardata"` + ConfirmationMethod Text `xml:"ConfirmationMethod"` +} + +type Attribute struct { + Text string `xml:",chardata"` + AttributeName string `xml:"AttributeName,attr"` + AttributeNamespace string `xml:"AttributeNamespace,attr"` + AttributeValue Text `xml:"AttributeValue"` +} + +type AuthenticationStatement struct { + Text string `xml:",chardata"` + AuthenticationMethod string `xml:"AuthenticationMethod,attr"` + AuthenticationInstant string `xml:"AuthenticationInstant,attr"` + Subject Subject `xml:"Subject"` +} + +type Signature struct { + Text string `xml:",chardata"` + Ds string `xml:"ds,attr"` + SignedInfo SignedInfo `xml:"SignedInfo"` + SignatureValue Text `xml:"SignatureValue"` + KeyInfo KeyInfo `xml:"KeyInfo"` +} + +type SignedInfo struct { + Text string `xml:",chardata"` + CanonicalizationMethod Method `xml:"CanonicalizationMethod"` + SignatureMethod Method `xml:"SignatureMethod"` + Reference Reference `xml:"Reference"` +} + +type Method struct { + Text string `xml:",chardata"` + Algorithm string `xml:"Algorithm,attr"` +} + +type Reference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` + Transforms Transforms `xml:"Transforms"` + DigestMethod Method `xml:"DigestMethod"` + DigestValue Text `xml:"DigestValue"` +} + +type Transforms struct { + Text string `xml:",chardata"` + Transform []Method `xml:"Transform"` +} + +type KeyInfo struct { + Text string `xml:",chardata"` + Xmlns string `xml:"xmlns,attr"` + X509Data X509Data `xml:"X509Data"` +} + +type X509Data struct { + Text string `xml:",chardata"` + X509Certificate Text `xml:"X509Certificate"` +} + +type RequestedAttachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} + +type SecurityTokenReference struct { + Text string `xml:",chardata"` + TokenType string `xml:"TokenType,attr"` + O string `xml:"o,attr"` + K string `xml:"k,attr"` + KeyIdentifier KeyIdentifier `xml:"KeyIdentifier"` +} + +type KeyIdentifier struct { + Text string `xml:",chardata"` + ValueType string `xml:"ValueType,attr"` +} + +type RequestedUnattachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go new file mode 100644 index 00000000000..6fe5efa8a9a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Version"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TrustUnknown-0] + _ = x[Trust2005-1] + _ = x[Trust13-2] +} + +const _Version_name = "TrustUnknownTrust2005Trust13" + +var _Version_index = [...]uint8{0, 12, 21, 28} + +func (i Version) String() string { + if i < 0 || i >= Version(len(_Version_index)-1) { + return "Version(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Version_name[_Version_index[i]:_Version_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go new file mode 100644 index 00000000000..8fad5efb5de --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + uuid "github.com/google/uuid" +) + +//go:generate stringer -type=Version + +type Version int + +const ( + TrustUnknown Version = iota + Trust2005 + Trust13 +) + +// Endpoint represents a WSTrust endpoint. +type Endpoint struct { + // Version is the version of the endpoint. + Version Version + // URL is the URL of the endpoint. + URL string +} + +type wsTrustTokenRequestEnvelope struct { + XMLName xml.Name `xml:"s:Envelope"` + Text string `xml:",chardata"` + S string `xml:"xmlns:s,attr"` + Wsa string `xml:"xmlns:wsa,attr"` + Wsu string `xml:"xmlns:wsu,attr"` + Header struct { + Text string `xml:",chardata"` + Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:Action"` + MessageID struct { + Text string `xml:",chardata"` + } `xml:"wsa:messageID"` + ReplyTo struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:ReplyTo"` + To struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:To"` + Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + Wsse string `xml:"xmlns:wsse,attr"` + Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Created struct { + Text string `xml:",chardata"` + } `xml:"wsu:Created"` + Expires struct { + Text string `xml:",chardata"` + } `xml:"wsu:Expires"` + } `xml:"wsu:Timestamp"` + UsernameToken struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Username struct { + Text string `xml:",chardata"` + } `xml:"wsse:Username"` + Password struct { + Text string `xml:",chardata"` + } `xml:"wsse:Password"` + } `xml:"wsse:UsernameToken"` + } `xml:"wsse:Security"` + } `xml:"s:Header"` + Body struct { + Text string `xml:",chardata"` + RequestSecurityToken struct { + Text string `xml:",chardata"` + Wst string `xml:"xmlns:wst,attr"` + AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"xmlns:wsp,attr"` + EndpointReference struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:EndpointReference"` + } `xml:"wsp:AppliesTo"` + KeyType struct { + Text string `xml:",chardata"` + } `xml:"wst:KeyType"` + RequestType struct { + Text string `xml:",chardata"` + } `xml:"wst:RequestType"` + } `xml:"wst:RequestSecurityToken"` + } `xml:"s:Body"` +} + +func buildTimeString(t time.Time) string { + // Golang time formats are weird: https://stackoverflow.com/questions/20234104/how-to-format-current-time-using-a-yyyymmddhhmmss-format + return t.Format("2006-01-02T15:04:05.000Z") +} + +func (wte *Endpoint) buildTokenRequestMessage(authType authority.AuthorizeType, cloudAudienceURN string, username string, password string) (string, error) { + var soapAction string + var trustNamespace string + var keyType string + var requestType string + + createdTime := time.Now().UTC() + expiresTime := createdTime.Add(10 * time.Minute) + + switch wte.Version { + case Trust2005: + soapAction = trust2005Spec + trustNamespace = "http://schemas.xmlsoap.org/ws/2005/02/trust" + keyType = "http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey" + requestType = "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue" + case Trust13: + soapAction = trust13Spec + trustNamespace = "http://docs.oasis-open.org/ws-sx/ws-trust/200512" + keyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer" + requestType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue" + default: + return "", fmt.Errorf("buildTokenRequestMessage had Version == %q, which is not recognized", wte.Version) + } + + var envelope wsTrustTokenRequestEnvelope + + messageUUID := uuid.New() + + envelope.S = "http://www.w3.org/2003/05/soap-envelope" + envelope.Wsa = "http://www.w3.org/2005/08/addressing" + envelope.Wsu = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" + + envelope.Header.Action.MustUnderstand = "1" + envelope.Header.Action.Text = soapAction + envelope.Header.MessageID.Text = "urn:uuid:" + messageUUID.String() + envelope.Header.ReplyTo.Address.Text = "http://www.w3.org/2005/08/addressing/anonymous" + envelope.Header.To.MustUnderstand = "1" + envelope.Header.To.Text = wte.URL + + switch authType { + case authority.ATUnknown: + return "", fmt.Errorf("buildTokenRequestMessage had no authority type(%v)", authType) + case authority.ATUsernamePassword: + endpointUUID := uuid.New() + + var trustID string + if wte.Version == Trust2005 { + trustID = "UnPwSecTok2005-" + endpointUUID.String() + } else { + trustID = "UnPwSecTok13-" + endpointUUID.String() + } + + envelope.Header.Security.MustUnderstand = "1" + envelope.Header.Security.Wsse = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" + envelope.Header.Security.Timestamp.ID = "MSATimeStamp" + envelope.Header.Security.Timestamp.Created.Text = buildTimeString(createdTime) + envelope.Header.Security.Timestamp.Expires.Text = buildTimeString(expiresTime) + envelope.Header.Security.UsernameToken.ID = trustID + envelope.Header.Security.UsernameToken.Username.Text = username + envelope.Header.Security.UsernameToken.Password.Text = password + default: + // This is just to note that we don't do anything for other cases. + // We aren't missing anything I know of. + } + + envelope.Body.RequestSecurityToken.Wst = trustNamespace + envelope.Body.RequestSecurityToken.AppliesTo.Wsp = "http://schemas.xmlsoap.org/ws/2004/09/policy" + envelope.Body.RequestSecurityToken.AppliesTo.EndpointReference.Address.Text = cloudAudienceURN + envelope.Body.RequestSecurityToken.KeyType.Text = keyType + envelope.Body.RequestSecurityToken.RequestType.Text = requestType + + output, err := xml.Marshal(envelope) + if err != nil { + return "", err + } + + return string(output), nil +} + +func (wte *Endpoint) BuildTokenRequestMessageWIA(cloudAudienceURN string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATWindowsIntegrated, cloudAudienceURN, "", "") +} + +func (wte *Endpoint) BuildTokenRequestMessageUsernamePassword(cloudAudienceURN string, username string, password string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATUsernamePassword, cloudAudienceURN, username, password) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go new file mode 100644 index 00000000000..e3d19886ebc --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "errors" + "fmt" + "strings" +) + +//go:generate stringer -type=endpointType + +type endpointType int + +const ( + etUnknown endpointType = iota + etUsernamePassword + etWindowsTransport +) + +type wsEndpointData struct { + Version Version + EndpointType endpointType +} + +const trust13Spec string = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" +const trust2005Spec string = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" + +type MexDocument struct { + UsernamePasswordEndpoint Endpoint + WindowsTransportEndpoint Endpoint + policies map[string]endpointType + bindings map[string]wsEndpointData +} + +func updateEndpoint(cached *Endpoint, found Endpoint) { + if cached == nil || cached.Version == TrustUnknown { + *cached = found + return + } + if (*cached).Version == Trust2005 && found.Version == Trust13 { + *cached = found + return + } +} + +// TODO(msal): Someone needs to write tests for everything below. + +// NewFromDef creates a new MexDocument. +func NewFromDef(defs Definitions) (MexDocument, error) { + policies, err := policies(defs) + if err != nil { + return MexDocument{}, err + } + + bindings, err := bindings(defs, policies) + if err != nil { + return MexDocument{}, err + } + + userPass, windows, err := endpoints(defs, bindings) + if err != nil { + return MexDocument{}, err + } + + return MexDocument{ + UsernamePasswordEndpoint: userPass, + WindowsTransportEndpoint: windows, + policies: policies, + bindings: bindings, + }, nil +} + +func policies(defs Definitions) (map[string]endpointType, error) { + policies := make(map[string]endpointType, len(defs.Policy)) + + for _, policy := range defs.Policy { + if policy.ExactlyOne.All.NegotiateAuthentication.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etWindowsTransport + } + } + + if policy.ExactlyOne.All.SignedEncryptedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + if policy.ExactlyOne.All.SignedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + } + + if len(policies) == 0 { + return policies, errors.New("no policies for mex document") + } + + return policies, nil +} + +func bindings(defs Definitions, policies map[string]endpointType) (map[string]wsEndpointData, error) { + bindings := make(map[string]wsEndpointData, len(defs.Binding)) + + for _, binding := range defs.Binding { + policyName := binding.PolicyReference.URI + transport := binding.Binding.Transport + + if transport == "http://schemas.xmlsoap.org/soap/http" { + if policy, ok := policies[policyName]; ok { + bindingName := binding.Name + specVersion := binding.Operation.Operation.SoapAction + + if specVersion == trust13Spec { + bindings[bindingName] = wsEndpointData{Trust13, policy} + } else if specVersion == trust2005Spec { + bindings[bindingName] = wsEndpointData{Trust2005, policy} + } else { + return nil, errors.New("found unknown spec version in mex document") + } + } + } + } + return bindings, nil +} + +func endpoints(defs Definitions, bindings map[string]wsEndpointData) (userPass, windows Endpoint, err error) { + for _, port := range defs.Service.Port { + bindingName := port.Binding + + index := strings.Index(bindingName, ":") + if index != -1 { + bindingName = bindingName[index+1:] + } + + if binding, ok := bindings[bindingName]; ok { + url := strings.TrimSpace(port.EndpointReference.Address.Text) + if url == "" { + return Endpoint{}, Endpoint{}, fmt.Errorf("MexDocument cannot have blank URL endpoint") + } + if binding.Version == TrustUnknown { + return Endpoint{}, Endpoint{}, fmt.Errorf("endpoint version unknown") + } + endpoint := Endpoint{Version: binding.Version, URL: url} + + switch binding.EndpointType { + case etUsernamePassword: + updateEndpoint(&userPass, endpoint) + case etWindowsTransport: + updateEndpoint(&windows, endpoint) + default: + return Endpoint{}, Endpoint{}, errors.New("found unknown port type in MEX document") + } + } + } + return userPass, windows, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go new file mode 100644 index 00000000000..47cd4c692d6 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package wstrust provides a client for communicating with a WSTrust (https://en.wikipedia.org/wiki/WS-Trust#:~:text=WS%2DTrust%20is%20a%20WS,in%20a%20secure%20message%20exchange.) +for the purposes of extracting metadata from the service. This data can be used to acquire +tokens using the accesstokens.Client.GetAccessTokenFromSamlGrant() call. +*/ +package wstrust + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" +) + +type xmlCaller interface { + XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error + SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error +} + +type SamlTokenInfo struct { + AssertionType string // Should be either constants SAMLV1Grant or SAMLV2Grant. + Assertion string +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm xmlCaller +} + +// TODO(msal): This allows me to call Mex without having a real Def file on line 45. +// This would fail because policies() would not find a policy. This is easy enough to +// fix in test data, but.... Definitions is defined with built in structs. That needs +// to be pulled apart and until then I have this hack in. +var newFromDef = defs.NewFromDef + +// Mex provides metadata about a wstrust service. +func (c Client) Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) { + resp := defs.Definitions{} + err := c.Comm.XMLCall( + ctx, + federationMetadataURL, + http.Header{}, + nil, + &resp, + ) + if err != nil { + return defs.MexDocument{}, err + } + + return newFromDef(resp) +} + +const ( + SoapActionDefault = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" + + // Note: Commented out because this action is not supported. It was in the original code + // but only used in a switch where it errored. Since there was only one value, a default + // worked better. However, buildTokenRequestMessage() had 2005 support. I'm not actually + // sure what's going on here. It like we have half support. For now this is here just + // for documentation purposes in case we are going to add support. + // + // SoapActionWSTrust2005 = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" +) + +// SAMLTokenInfo provides SAML information that is used to generate a SAML token. +func (c Client) SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (SamlTokenInfo, error) { + var wsTrustRequestMessage string + var err error + + switch authParameters.AuthorizationType { + case authority.ATWindowsIntegrated: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageWIA(cloudAudienceURN) + if err != nil { + return SamlTokenInfo{}, err + } + case authority.ATUsernamePassword: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageUsernamePassword( + cloudAudienceURN, authParameters.Username, authParameters.Password) + if err != nil { + return SamlTokenInfo{}, err + } + default: + return SamlTokenInfo{}, fmt.Errorf("unknown auth type %v", authParameters.AuthorizationType) + } + + var soapAction string + switch endpoint.Version { + case defs.Trust13: + soapAction = SoapActionDefault + case defs.Trust2005: + return SamlTokenInfo{}, errors.New("WS Trust 2005 support is not implemented") + default: + return SamlTokenInfo{}, fmt.Errorf("the SOAP endpoint for a wstrust call had an invalid version: %v", endpoint.Version) + } + + resp := defs.SAMLDefinitions{} + err = c.Comm.SOAPCall(ctx, endpoint.URL, soapAction, http.Header{}, nil, wsTrustRequestMessage, &resp) + if err != nil { + return SamlTokenInfo{}, err + } + + return c.samlAssertion(resp) +} + +const ( + samlv1Assertion = "urn:oasis:names:tc:SAML:1.0:assertion" + samlv2Assertion = "urn:oasis:names:tc:SAML:2.0:assertion" +) + +func (c Client) samlAssertion(def defs.SAMLDefinitions) (SamlTokenInfo, error) { + for _, tokenResponse := range def.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse { + token := tokenResponse.RequestedSecurityToken + if token.Assertion.XMLName.Local != "" { + assertion := token.AssertionRawXML + + samlVersion := token.Assertion.Saml + switch samlVersion { + case samlv1Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV1, Assertion: assertion}, nil + case samlv2Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV2, Assertion: assertion}, nil + } + return SamlTokenInfo{}, fmt.Errorf("couldn't parse SAML assertion, version unknown: %q", samlVersion) + } + } + return SamlTokenInfo{}, errors.New("unknown WS-Trust version") +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go new file mode 100644 index 00000000000..0ade411797a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// TODO(msal): Write some tests. The original code this came from didn't have tests and I'm too +// tired at this point to do it. It, like many other *Manager code I found was broken because +// they didn't have mutex protection. + +package oauth + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" +) + +// ADFS is an active directory federation service authority type. +const ADFS = "ADFS" + +type cacheEntry struct { + Endpoints authority.Endpoints + ValidForDomainsInList map[string]bool +} + +func createcacheEntry(endpoints authority.Endpoints) cacheEntry { + return cacheEntry{endpoints, map[string]bool{}} +} + +// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition. +type authorityEndpoint struct { + rest *ops.REST + + mu sync.Mutex + cache map[string]cacheEntry +} + +// newAuthorityEndpoint is the constructor for AuthorityEndpoint. +func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint { + m := &authorityEndpoint{rest: rest, cache: map[string]cacheEntry{}} + return m +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance +func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + + if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found { + return endpoints, nil + } + + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + if err != nil { + return authority.Endpoints{}, err + } + + resp, err := m.rest.Authority().GetTenantDiscoveryResponse(ctx, endpoint) + if err != nil { + return authority.Endpoints{}, err + } + if err := resp.Validate(); err != nil { + return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err) + } + + tenant := authorityInfo.Tenant + + endpoints := authority.NewEndpoints( + strings.Replace(resp.AuthorizationEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.TokenEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.Issuer, "{tenant}", tenant, -1), + authorityInfo.Host) + + m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints) + + return endpoints, nil +} + +// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false. +func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) { + m.mu.Lock() + defer m.mu.Unlock() + + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + if authorityInfo.AuthorityType == ADFS { + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { + return cacheEntry.Endpoints, true + } + } + } + return cacheEntry.Endpoints, true + } + return authority.Endpoints{}, false +} + +func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, userPrincipalName string, endpoints authority.Endpoints) { + m.mu.Lock() + defer m.mu.Unlock() + + updatedCacheEntry := createcacheEntry(endpoints) + + if authorityInfo.AuthorityType == ADFS { + // Since we're here, we've made a call to the backend. We want to ensure we're caching + // the latest values from the server. + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + for k := range cacheEntry.ValidForDomainsInList { + updatedCacheEntry.ValidForDomainsInList[k] = true + } + } + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + updatedCacheEntry.ValidForDomainsInList[domain] = true + } + } + + m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry +} + +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { + if authorityInfo.Tenant == "adfs" { + return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + } else if authorityInfo.Region != "" { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + + } + + return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil +} + +func adfsDomainFromUpn(userPrincipalName string) (string, error) { + parts := strings.Split(userPrincipalName, "@") + if len(parts) < 2 { + return "", errors.New("no @ present in user principal name") + } + return parts[1], nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go new file mode 100644 index 00000000000..4561d72db4d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package options + +import ( + "errors" + "fmt" +) + +// CallOption implements an optional argument to a method call. See +// https://blog.devgenius.io/go-call-option-that-can-be-used-with-multiple-methods-6c81734f3dbe +// for an explanation of the usage pattern. +type CallOption interface { + Do(any) error + callOption() +} + +// ApplyOptions applies all the callOptions to options. options must be a pointer to a struct and +// callOptions must be a list of objects that implement CallOption. +func ApplyOptions[O, C any](options O, callOptions []C) error { + for _, o := range callOptions { + if t, ok := any(o).(CallOption); !ok { + return fmt.Errorf("unexpected option type %T", o) + } else if err := t.Do(options); err != nil { + return err + } + } + return nil +} + +// NewCallOption returns a new CallOption whose Do() method calls function "f". +func NewCallOption(f func(any) error) CallOption { + if f == nil { + // This isn't a practical concern because only an MSAL maintainer can get + // us here, by implementing a do-nothing option. But if someone does that, + // the below ensures the method invoked with the option returns an error. + return callOption(func(any) error { + return errors.New("invalid option: missing implementation") + }) + } + return callOption(f) +} + +// callOption is an adapter for a function to a CallOption +type callOption func(any) error + +func (c callOption) Do(a any) error { + return c(a) +} + +func (callOption) callOption() {} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go new file mode 100644 index 00000000000..f7e12a71bf3 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package shared + +import ( + "net/http" + "reflect" + "strings" +) + +const ( + // CacheKeySeparator is used in creating the keys of the cache. + CacheKeySeparator = "-" +) + +type Account struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + LocalAccountID string `json:"local_account_id,omitempty"` + AuthorityType string `json:"authority_type,omitempty"` + PreferredUsername string `json:"username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + AlternativeID string `json:"alternative_account_id,omitempty"` + RawClientInfo string `json:"client_info,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccount creates an account. +func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, username string) Account { + return Account{ + HomeAccountID: homeAccountID, + Environment: env, + Realm: realm, + LocalAccountID: localAccountID, + AuthorityType: authorityType, + PreferredUsername: username, + } +} + +// Key creates the key for storing accounts in the cache. +func (acc Account) Key() string { + return strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator) +} + +// IsZero checks the zero value of account. +func (acc Account) IsZero() bool { + v := reflect.ValueOf(acc) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// DefaultClient is our default shared HTTP client. +var DefaultClient = &http.Client{} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go new file mode 100644 index 00000000000..b76c0c56962 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package version keeps the version number of the client package. +package version + +// Version is the version of this client package that is communicated to the server. +const Version = "1.0.0" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go new file mode 100644 index 00000000000..cce05277e80 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -0,0 +1,683 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package public provides a client for authentication of "public" applications. A "public" +application is defined as an app that runs on client devices (android, ios, windows, linux, ...). +These devices are "untrusted" and access resources via web APIs that must authenticate. +*/ +package public + +/* +Design note: + +public.Client uses client.Base as an embedded type. client.Base statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be includee in the package documentation. + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/url" + "strconv" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" + "github.com/google/uuid" + "github.com/pkg/browser" +) + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// clientOptions configures the Client's behavior. +type clientOptions struct { + accessor cache.ExportReplace + authority string + capabilities []string + disableInstanceDiscovery bool + httpClient ops.HTTPClient +} + +func (p *clientOptions) validate() error { + u, err := url.Parse(p.authority) + if err != nil { + return fmt.Errorf("Authority options cannot be URL parsed: %w", err) + } + if u.Scheme != "https" { + return fmt.Errorf("Authority(%s) did not start with https://", u.String()) + } + return nil +} + +// Option is an optional argument to the New constructor. +type Option func(o *clientOptions) + +// WithAuthority allows for a custom authority to be set. This must be a valid https url. +func WithAuthority(authority string) Option { + return func(o *clientOptions) { + o.authority = authority + } +} + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// Client is a representation of authentication client for public applications as defined in the +// package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications. +type Client struct { + base base.Client +} + +// New is the constructor for Client. +func New(clientID string, options ...Option) (Client, error) { + opts := clientOptions{ + authority: base.AuthorityPublicCloud, + httpClient: shared.DefaultClient, + } + + for _, o := range options { + o(&opts) + } + if err := opts.validate(); err != nil { + return Client{}, err + } + + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), base.WithInstanceDiscovery(!opts.disableInstanceDiscovery)) + if err != nil { + return Client{}, err + } + return Client{base}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (pca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByDeviceCodeOptions: + t.claims = claims + case *acquireTokenByUsernamePasswordOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + case *interactiveAuthOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByDeviceCodeOptions: + t.tenantID = tenantID + case *acquireTokenByUsernamePasswordOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + case *interactiveAuthOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + Claims: o.claims, + RequestType: accesstokens.ATPublic, + IsAppCache: false, + TenantID: o.tenantID, + } + + return pca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword +type acquireTokenByUsernamePasswordOptions struct { + claims, tenantID string +} + +// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword +type AcquireByUsernamePasswordOption interface { + acquireByUsernamePasswordOption() +} + +// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication. +// NOTE: this flow is NOT recommended. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) { + o := acquireTokenByUsernamePasswordOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATUsernamePassword + authParams.Claims = o.claims + authParams.Username = username + authParams.Password = password + + token, err := pca.base.Token.UsernamePassword(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type DeviceCodeResult = accesstokens.DeviceCodeResult + +// DeviceCode provides the results of the device code flows first stage (containing the code) +// that must be entered on the second device and provides a method to retrieve the AuthenticationResult +// once that code has been entered and verified. +type DeviceCode struct { + // Result holds the information about the device code (such as the code). + Result DeviceCodeResult + + authParams authority.AuthParams + client Client + dc oauth.DeviceCode +} + +// AuthenticationResult retreives the AuthenticationResult once the user enters the code +// on the second device. Until then it blocks until the .AcquireTokenByDeviceCode() context +// is cancelled or the token expires. +func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error) { + token, err := d.dc.Token(ctx) + if err != nil { + return AuthResult{}, err + } + return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) +} + +// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode +type acquireTokenByDeviceCodeOptions struct { + claims, tenantID string +} + +// AcquireByDeviceCodeOption is implemented by options for AcquireTokenByDeviceCode +type AcquireByDeviceCodeOption interface { + acquireByDeviceCodeOptions() +} + +// AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token. +// Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string, opts ...AcquireByDeviceCodeOption) (DeviceCode, error) { + o := acquireTokenByDeviceCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return DeviceCode{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return DeviceCode{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATDeviceCode + authParams.Claims = o.claims + + dc, err := pca.base.Token.DeviceCode(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dc.Result, authParams: authParams, client: pca, dc: dc}, nil +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATPublic, + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return pca.base.AcquireTokenByAuthCode(ctx, params) +} + +// Accounts gets all the accounts in the token cache. +// If there are no accounts in the cache the returned slice is empty. +func (pca Client) Accounts(ctx context.Context) ([]Account, error) { + return pca.base.AllAccounts(ctx) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (pca Client) RemoveAccount(ctx context.Context, account Account) error { + return pca.base.RemoveAccount(ctx, account) +} + +// interactiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow. +type interactiveAuthOptions struct { + claims, domainHint, loginHint, redirectURI, tenantID string +} + +// AcquireInteractiveOption is implemented by options for AcquireTokenInteractive +type AcquireInteractiveOption interface { + acquireInteractiveOption() +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + case *interactiveAuthOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + case *interactiveAuthOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithRedirectURI sets a port for the local server used in interactive authentication, for +// example http://localhost:port. All URI components other than the port are ignored. +func WithRedirectURI(redirectURI string) interface { + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *interactiveAuthOptions: + t.redirectURI = redirectURI + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication +// +// Options: [WithDomainHint], [WithLoginHint], [WithRedirectURI], [WithTenantID] +func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, opts ...AcquireInteractiveOption) (AuthResult, error) { + o := interactiveAuthOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding. + // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636 + cv, challenge, err := codeVerifier() + if err != nil { + return AuthResult{}, err + } + var redirectURL *url.URL + if o.redirectURI != "" { + redirectURL, err = url.Parse(o.redirectURI) + if err != nil { + return AuthResult{}, err + } + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATInteractive + authParams.Claims = o.claims + authParams.CodeChallenge = challenge + authParams.CodeChallengeMethod = "S256" + authParams.LoginHint = o.loginHint + authParams.DomainHint = o.domainHint + authParams.State = uuid.New().String() + authParams.Prompt = "select_account" + res, err := pca.browserLogin(ctx, redirectURL, authParams) + if err != nil { + return AuthResult{}, err + } + authParams.Redirecturi = res.redirectURI + + req, err := accesstokens.NewCodeChallengeRequest(authParams, accesstokens.ATPublic, nil, res.authCode, cv) + if err != nil { + return AuthResult{}, err + } + + token, err := pca.base.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type interactiveAuthResult struct { + authCode string + redirectURI string +} + +// provides a test hook to simulate opening a browser +var browserOpenURL = func(authURL string) error { + return browser.OpenURL(authURL) +} + +// parses the port number from the provided URL. +// returns 0 if nil or no port is specified. +func parsePort(u *url.URL) (int, error) { + if u == nil { + return 0, nil + } + p := u.Port() + if p == "" { + return 0, nil + } + return strconv.Atoi(p) +} + +// browserLogin launches the system browser for interactive login +func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams) (interactiveAuthResult, error) { + // start local redirect server so login can call us back + port, err := parsePort(redirectURI) + if err != nil { + return interactiveAuthResult{}, err + } + srv, err := local.New(params.State, port) + if err != nil { + return interactiveAuthResult{}, err + } + defer srv.Shutdown() + params.Scopes = accesstokens.AppendDefaultScopes(params) + authURL, err := pca.base.AuthCodeURL(ctx, params.ClientID, srv.Addr, params.Scopes, params) + if err != nil { + return interactiveAuthResult{}, err + } + // open browser window so user can select credentials + if err := browserOpenURL(authURL); err != nil { + return interactiveAuthResult{}, err + } + // now wait until the logic calls us back + res := srv.Result(ctx) + if res.Err != nil { + return interactiveAuthResult{}, res.Err + } + return interactiveAuthResult{ + authCode: res.Code, + redirectURI: srv.Addr, + }, nil +} + +// creates a code verifier string along with its SHA256 hash which +// is used as the challenge when requesting an auth code. +// used in interactive auth flow for PKCE. +func codeVerifier() (codeVerifier string, challenge string, err error) { + cvBytes := make([]byte, 32) + if _, err = rand.Read(cvBytes); err != nil { + return + } + codeVerifier = base64.RawURLEncoding.EncodeToString(cvBytes) + // for PKCE, create a hash of the code verifier + cvh := sha256.Sum256([]byte(codeVerifier)) + challenge = base64.RawURLEncoding.EncodeToString(cvh[:]) + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/vendor/github.com/kylelemons/godebug/LICENSE similarity index 94% rename from vendor/github.com/Azure/go-autorest/autorest/to/LICENSE rename to vendor/github.com/kylelemons/godebug/LICENSE index b9d6a27ea92..d6456956733 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE +++ b/vendor/github.com/kylelemons/godebug/LICENSE @@ -176,7 +176,18 @@ END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kylelemons/godebug/diff/diff.go b/vendor/github.com/kylelemons/godebug/diff/diff.go new file mode 100644 index 00000000000..200e596c625 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/diff/diff.go @@ -0,0 +1,186 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package diff + +import ( + "bytes" + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + + chunks := DiffChunks(aLines, bLines) + + buf := new(bytes.Buffer) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/vendor/github.com/kylelemons/godebug/pretty/.gitignore b/vendor/github.com/kylelemons/godebug/pretty/.gitignore new file mode 100644 index 00000000000..fa9a735da3c --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/.gitignore @@ -0,0 +1,5 @@ +*.test +*.bench +*.golden +*.txt +*.prof diff --git a/vendor/github.com/kylelemons/godebug/pretty/doc.go b/vendor/github.com/kylelemons/godebug/pretty/doc.go new file mode 100644 index 00000000000..03b5718a70d --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/doc.go @@ -0,0 +1,25 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pretty pretty-prints Go structures. +// +// This package uses reflection to examine a Go value and can +// print out in a nice, aligned fashion. It supports three +// modes (normal, compact, and extended) for advanced use. +// +// See the Reflect and Print examples for what the output looks like. +package pretty + +// TODO: +// - Catch cycles diff --git a/vendor/github.com/kylelemons/godebug/pretty/public.go b/vendor/github.com/kylelemons/godebug/pretty/public.go new file mode 100644 index 00000000000..fbc5d7abbf8 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/public.go @@ -0,0 +1,188 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "bytes" + "fmt" + "io" + "net" + "reflect" + "time" + + "github.com/kylelemons/godebug/diff" +) + +// A Config represents optional configuration parameters for formatting. +// +// Some options, notably ShortList, dramatically increase the overhead +// of pretty-printing a value. +type Config struct { + // Verbosity options + Compact bool // One-line output. Overrides Diffable. + Diffable bool // Adds extra newlines for more easily diffable output. + + // Field and value options + IncludeUnexported bool // Include unexported fields in output + PrintStringers bool // Call String on a fmt.Stringer + PrintTextMarshalers bool // Call MarshalText on an encoding.TextMarshaler + SkipZeroFields bool // Skip struct fields that have a zero value. + + // Output transforms + ShortList int // Maximum character length for short lists if nonzero. + + // Type-specific overrides + // + // Formatter maps a type to a function that will provide a one-line string + // representation of the input value. Conceptually: + // Formatter[reflect.TypeOf(v)](v) = "v as a string" + // + // Note that the first argument need not explicitly match the type, it must + // merely be callable with it. + // + // When processing an input value, if its type exists as a key in Formatter: + // 1) If the value is nil, no stringification is performed. + // This allows overriding of PrintStringers and PrintTextMarshalers. + // 2) The value will be called with the input as its only argument. + // The function must return a string as its first return value. + // + // In addition to func literals, two common values for this will be: + // fmt.Sprint (function) func Sprint(...interface{}) string + // Type.String (method) func (Type) String() string + // + // Note that neither of these work if the String method is a pointer + // method and the input will be provided as a value. In that case, + // use a function that calls .String on the formal value parameter. + Formatter map[reflect.Type]interface{} + + // If TrackCycles is enabled, pretty will detect and track + // self-referential structures. If a self-referential structure (aka a + // "recursive" value) is detected, numbered placeholders will be emitted. + // + // Pointer tracking is disabled by default for performance reasons. + TrackCycles bool +} + +// Default Config objects +var ( + // DefaultFormatter is the default set of overrides for stringification. + DefaultFormatter = map[reflect.Type]interface{}{ + reflect.TypeOf(time.Time{}): fmt.Sprint, + reflect.TypeOf(net.IP{}): fmt.Sprint, + reflect.TypeOf((*error)(nil)).Elem(): fmt.Sprint, + } + + // CompareConfig is the default configuration used for Compare. + CompareConfig = &Config{ + Diffable: true, + IncludeUnexported: true, + Formatter: DefaultFormatter, + } + + // DefaultConfig is the default configuration used for all other top-level functions. + DefaultConfig = &Config{ + Formatter: DefaultFormatter, + } + + // CycleTracker is a convenience config for formatting and comparing recursive structures. + CycleTracker = &Config{ + Diffable: true, + Formatter: DefaultFormatter, + TrackCycles: true, + } +) + +func (cfg *Config) fprint(buf *bytes.Buffer, vals ...interface{}) { + ref := &reflector{ + Config: cfg, + } + if cfg.TrackCycles { + ref.pointerTracker = new(pointerTracker) + } + for i, val := range vals { + if i > 0 { + buf.WriteByte('\n') + } + newFormatter(cfg, buf).write(ref.val2node(reflect.ValueOf(val))) + } +} + +// Print writes the DefaultConfig representation of the given values to standard output. +func Print(vals ...interface{}) { + DefaultConfig.Print(vals...) +} + +// Print writes the configured presentation of the given values to standard output. +func (cfg *Config) Print(vals ...interface{}) { + fmt.Println(cfg.Sprint(vals...)) +} + +// Sprint returns a string representation of the given value according to the DefaultConfig. +func Sprint(vals ...interface{}) string { + return DefaultConfig.Sprint(vals...) +} + +// Sprint returns a string representation of the given value according to cfg. +func (cfg *Config) Sprint(vals ...interface{}) string { + buf := new(bytes.Buffer) + cfg.fprint(buf, vals...) + return buf.String() +} + +// Fprint writes the representation of the given value to the writer according to the DefaultConfig. +func Fprint(w io.Writer, vals ...interface{}) (n int64, err error) { + return DefaultConfig.Fprint(w, vals...) +} + +// Fprint writes the representation of the given value to the writer according to the cfg. +func (cfg *Config) Fprint(w io.Writer, vals ...interface{}) (n int64, err error) { + buf := new(bytes.Buffer) + cfg.fprint(buf, vals...) + return buf.WriteTo(w) +} + +// Compare returns a string containing a line-by-line unified diff of the +// values in a and b, using the CompareConfig. +// +// Each line in the output is prefixed with '+', '-', or ' ' to indicate which +// side it's from. Lines from the a side are marked with '-', lines from the +// b side are marked with '+' and lines that are the same on both sides are +// marked with ' '. +// +// The comparison is based on the intentionally-untyped output of Print, and as +// such this comparison is pretty forviving. In particular, if the types of or +// types within in a and b are different but have the same representation, +// Compare will not indicate any differences between them. +func Compare(a, b interface{}) string { + return CompareConfig.Compare(a, b) +} + +// Compare returns a string containing a line-by-line unified diff of the +// values in got and want according to the cfg. +// +// Each line in the output is prefixed with '+', '-', or ' ' to indicate which +// side it's from. Lines from the a side are marked with '-', lines from the +// b side are marked with '+' and lines that are the same on both sides are +// marked with ' '. +// +// The comparison is based on the intentionally-untyped output of Print, and as +// such this comparison is pretty forviving. In particular, if the types of or +// types within in a and b are different but have the same representation, +// Compare will not indicate any differences between them. +func (cfg *Config) Compare(a, b interface{}) string { + diffCfg := *cfg + diffCfg.Diffable = true + return diff.Diff(cfg.Sprint(a), cfg.Sprint(b)) +} diff --git a/vendor/github.com/kylelemons/godebug/pretty/reflect.go b/vendor/github.com/kylelemons/godebug/pretty/reflect.go new file mode 100644 index 00000000000..5cd30b7f036 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/reflect.go @@ -0,0 +1,241 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "encoding" + "fmt" + "reflect" + "sort" +) + +func isZeroVal(val reflect.Value) bool { + if !val.CanInterface() { + return false + } + z := reflect.Zero(val.Type()).Interface() + return reflect.DeepEqual(val.Interface(), z) +} + +// pointerTracker is a helper for tracking pointer chasing to detect cycles. +type pointerTracker struct { + addrs map[uintptr]int // addr[address] = seen count + + lastID int + ids map[uintptr]int // ids[address] = id +} + +// track tracks following a reference (pointer, slice, map, etc). Every call to +// track should be paired with a call to untrack. +func (p *pointerTracker) track(ptr uintptr) { + if p.addrs == nil { + p.addrs = make(map[uintptr]int) + } + p.addrs[ptr]++ +} + +// untrack registers that we have backtracked over the reference to the pointer. +func (p *pointerTracker) untrack(ptr uintptr) { + p.addrs[ptr]-- + if p.addrs[ptr] == 0 { + delete(p.addrs, ptr) + } +} + +// seen returns whether the pointer was previously seen along this path. +func (p *pointerTracker) seen(ptr uintptr) bool { + _, ok := p.addrs[ptr] + return ok +} + +// keep allocates an ID for the given address and returns it. +func (p *pointerTracker) keep(ptr uintptr) int { + if p.ids == nil { + p.ids = make(map[uintptr]int) + } + if _, ok := p.ids[ptr]; !ok { + p.lastID++ + p.ids[ptr] = p.lastID + } + return p.ids[ptr] +} + +// id returns the ID for the given address. +func (p *pointerTracker) id(ptr uintptr) (int, bool) { + if p.ids == nil { + p.ids = make(map[uintptr]int) + } + id, ok := p.ids[ptr] + return id, ok +} + +// reflector adds local state to the recursive reflection logic. +type reflector struct { + *Config + *pointerTracker +} + +// follow handles following a possiblly-recursive reference to the given value +// from the given ptr address. +func (r *reflector) follow(ptr uintptr, val reflect.Value) node { + if r.pointerTracker == nil { + // Tracking disabled + return r.val2node(val) + } + + // If a parent already followed this, emit a reference marker + if r.seen(ptr) { + id := r.keep(ptr) + return ref{id} + } + + // Track the pointer we're following while on this recursive branch + r.track(ptr) + defer r.untrack(ptr) + n := r.val2node(val) + + // If the recursion used this ptr, wrap it with a target marker + if id, ok := r.id(ptr); ok { + return target{id, n} + } + + // Otherwise, return the node unadulterated + return n +} + +func (r *reflector) val2node(val reflect.Value) node { + if !val.IsValid() { + return rawVal("nil") + } + + if val.CanInterface() { + v := val.Interface() + if formatter, ok := r.Formatter[val.Type()]; ok { + if formatter != nil { + res := reflect.ValueOf(formatter).Call([]reflect.Value{val}) + return rawVal(res[0].Interface().(string)) + } + } else { + if s, ok := v.(fmt.Stringer); ok && r.PrintStringers { + return stringVal(s.String()) + } + if t, ok := v.(encoding.TextMarshaler); ok && r.PrintTextMarshalers { + if raw, err := t.MarshalText(); err == nil { // if NOT an error + return stringVal(string(raw)) + } + } + } + } + + switch kind := val.Kind(); kind { + case reflect.Ptr: + if val.IsNil() { + return rawVal("nil") + } + return r.follow(val.Pointer(), val.Elem()) + case reflect.Interface: + if val.IsNil() { + return rawVal("nil") + } + return r.val2node(val.Elem()) + case reflect.String: + return stringVal(val.String()) + case reflect.Slice: + n := list{} + length := val.Len() + ptr := val.Pointer() + for i := 0; i < length; i++ { + n = append(n, r.follow(ptr, val.Index(i))) + } + return n + case reflect.Array: + n := list{} + length := val.Len() + for i := 0; i < length; i++ { + n = append(n, r.val2node(val.Index(i))) + } + return n + case reflect.Map: + // Extract the keys and sort them for stable iteration + keys := val.MapKeys() + pairs := make([]mapPair, 0, len(keys)) + for _, key := range keys { + pairs = append(pairs, mapPair{ + key: new(formatter).compactString(r.val2node(key)), // can't be cyclic + value: val.MapIndex(key), + }) + } + sort.Sort(byKey(pairs)) + + // Process the keys into the final representation + ptr, n := val.Pointer(), keyvals{} + for _, pair := range pairs { + n = append(n, keyval{ + key: pair.key, + val: r.follow(ptr, pair.value), + }) + } + return n + case reflect.Struct: + n := keyvals{} + typ := val.Type() + fields := typ.NumField() + for i := 0; i < fields; i++ { + sf := typ.Field(i) + if !r.IncludeUnexported && sf.PkgPath != "" { + continue + } + field := val.Field(i) + if r.SkipZeroFields && isZeroVal(field) { + continue + } + n = append(n, keyval{sf.Name, r.val2node(field)}) + } + return n + case reflect.Bool: + if val.Bool() { + return rawVal("true") + } + return rawVal("false") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rawVal(fmt.Sprintf("%d", val.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rawVal(fmt.Sprintf("%d", val.Uint())) + case reflect.Uintptr: + return rawVal(fmt.Sprintf("0x%X", val.Uint())) + case reflect.Float32, reflect.Float64: + return rawVal(fmt.Sprintf("%v", val.Float())) + case reflect.Complex64, reflect.Complex128: + return rawVal(fmt.Sprintf("%v", val.Complex())) + } + + // Fall back to the default %#v if we can + if val.CanInterface() { + return rawVal(fmt.Sprintf("%#v", val.Interface())) + } + + return rawVal(val.String()) +} + +type mapPair struct { + key string + value reflect.Value +} + +type byKey []mapPair + +func (v byKey) Len() int { return len(v) } +func (v byKey) Swap(i, j int) { v[i], v[j] = v[j], v[i] } +func (v byKey) Less(i, j int) bool { return v[i].key < v[j].key } diff --git a/vendor/github.com/kylelemons/godebug/pretty/structure.go b/vendor/github.com/kylelemons/godebug/pretty/structure.go new file mode 100644 index 00000000000..d876f60cad2 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/structure.go @@ -0,0 +1,223 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// a formatter stores stateful formatting information as well as being +// an io.Writer for simplicity. +type formatter struct { + *bufio.Writer + *Config + + // Self-referential structure tracking + tagNumbers map[int]int // tagNumbers[id] = <#n> +} + +// newFormatter creates a new buffered formatter. For the output to be written +// to the given writer, this must be accompanied by a call to write (or Flush). +func newFormatter(cfg *Config, w io.Writer) *formatter { + return &formatter{ + Writer: bufio.NewWriter(w), + Config: cfg, + tagNumbers: make(map[int]int), + } +} + +func (f *formatter) write(n node) { + defer f.Flush() + n.format(f, "") +} + +func (f *formatter) tagFor(id int) int { + if tag, ok := f.tagNumbers[id]; ok { + return tag + } + if f.tagNumbers == nil { + return 0 + } + tag := len(f.tagNumbers) + 1 + f.tagNumbers[id] = tag + return tag +} + +type node interface { + format(f *formatter, indent string) +} + +func (f *formatter) compactString(n node) string { + switch k := n.(type) { + case stringVal: + return string(k) + case rawVal: + return string(k) + } + + buf := new(bytes.Buffer) + f2 := newFormatter(&Config{Compact: true}, buf) + f2.tagNumbers = f.tagNumbers // reuse tagNumbers just in case + f2.write(n) + return buf.String() +} + +type stringVal string + +func (str stringVal) format(f *formatter, indent string) { + f.WriteString(strconv.Quote(string(str))) +} + +type rawVal string + +func (r rawVal) format(f *formatter, indent string) { + f.WriteString(string(r)) +} + +type keyval struct { + key string + val node +} + +type keyvals []keyval + +func (l keyvals) format(f *formatter, indent string) { + f.WriteByte('{') + + switch { + case f.Compact: + // All on one line: + for i, kv := range l { + if i > 0 { + f.WriteByte(',') + } + f.WriteString(kv.key) + f.WriteByte(':') + kv.val.format(f, indent) + } + case f.Diffable: + f.WriteByte('\n') + inner := indent + " " + // Each value gets its own line: + for _, kv := range l { + f.WriteString(inner) + f.WriteString(kv.key) + f.WriteString(": ") + kv.val.format(f, inner) + f.WriteString(",\n") + } + f.WriteString(indent) + default: + keyWidth := 0 + for _, kv := range l { + if kw := len(kv.key); kw > keyWidth { + keyWidth = kw + } + } + alignKey := indent + " " + alignValue := strings.Repeat(" ", keyWidth) + inner := alignKey + alignValue + " " + // First and last line shared with bracket: + for i, kv := range l { + if i > 0 { + f.WriteString(",\n") + f.WriteString(alignKey) + } + f.WriteString(kv.key) + f.WriteString(": ") + f.WriteString(alignValue[len(kv.key):]) + kv.val.format(f, inner) + } + } + + f.WriteByte('}') +} + +type list []node + +func (l list) format(f *formatter, indent string) { + if max := f.ShortList; max > 0 { + short := f.compactString(l) + if len(short) <= max { + f.WriteString(short) + return + } + } + + f.WriteByte('[') + + switch { + case f.Compact: + // All on one line: + for i, v := range l { + if i > 0 { + f.WriteByte(',') + } + v.format(f, indent) + } + case f.Diffable: + f.WriteByte('\n') + inner := indent + " " + // Each value gets its own line: + for _, v := range l { + f.WriteString(inner) + v.format(f, inner) + f.WriteString(",\n") + } + f.WriteString(indent) + default: + inner := indent + " " + // First and last line shared with bracket: + for i, v := range l { + if i > 0 { + f.WriteString(",\n") + f.WriteString(inner) + } + v.format(f, inner) + } + } + + f.WriteByte(']') +} + +type ref struct { + id int +} + +func (r ref) format(f *formatter, indent string) { + fmt.Fprintf(f, "", f.tagFor(r.id)) +} + +type target struct { + id int + value node +} + +func (t target) format(f *formatter, indent string) { + tag := fmt.Sprintf("<#%d> ", f.tagFor(t.id)) + switch { + case f.Diffable, f.Compact: + // no indent changes + default: + indent += strings.Repeat(" ", len(tag)) + } + f.WriteString(tag) + t.value.format(f, indent) +} diff --git a/vendor/github.com/pkg/browser/LICENSE b/vendor/github.com/pkg/browser/LICENSE new file mode 100644 index 00000000000..65f78fb6291 --- /dev/null +++ b/vendor/github.com/pkg/browser/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2014, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/browser/README.md b/vendor/github.com/pkg/browser/README.md new file mode 100644 index 00000000000..72b1976e303 --- /dev/null +++ b/vendor/github.com/pkg/browser/README.md @@ -0,0 +1,55 @@ + +# browser + import "github.com/pkg/browser" + +Package browser provides helpers to open files, readers, and urls in a browser window. + +The choice of which browser is started is entirely client dependant. + + + + + +## Variables +``` go +var Stderr io.Writer = os.Stderr +``` +Stderr is the io.Writer to which executed commands write standard error. + +``` go +var Stdout io.Writer = os.Stdout +``` +Stdout is the io.Writer to which executed commands write standard output. + + +## func OpenFile +``` go +func OpenFile(path string) error +``` +OpenFile opens new browser window for the file path. + + +## func OpenReader +``` go +func OpenReader(r io.Reader) error +``` +OpenReader consumes the contents of r and presents the +results in a new browser window. + + +## func OpenURL +``` go +func OpenURL(url string) error +``` +OpenURL opens a new browser window pointing to url. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/pkg/browser/browser.go b/vendor/github.com/pkg/browser/browser.go new file mode 100644 index 00000000000..d7969d74d80 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser.go @@ -0,0 +1,57 @@ +// Package browser provides helpers to open files, readers, and urls in a browser window. +// +// The choice of which browser is started is entirely client dependant. +package browser + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" +) + +// Stdout is the io.Writer to which executed commands write standard output. +var Stdout io.Writer = os.Stdout + +// Stderr is the io.Writer to which executed commands write standard error. +var Stderr io.Writer = os.Stderr + +// OpenFile opens new browser window for the file path. +func OpenFile(path string) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + return OpenURL("file://" + path) +} + +// OpenReader consumes the contents of r and presents the +// results in a new browser window. +func OpenReader(r io.Reader) error { + f, err := ioutil.TempFile("", "browser.*.html") + if err != nil { + return fmt.Errorf("browser: could not create temporary file: %v", err) + } + if _, err := io.Copy(f, r); err != nil { + f.Close() + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + if err := f.Close(); err != nil { + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + return OpenFile(f.Name()) +} + +// OpenURL opens a new browser window pointing to url. +func OpenURL(url string) error { + return openBrowser(url) +} + +func runCmd(prog string, args ...string) error { + cmd := exec.Command(prog, args...) + cmd.Stdout = Stdout + cmd.Stderr = Stderr + return cmd.Run() +} diff --git a/vendor/github.com/pkg/browser/browser_darwin.go b/vendor/github.com/pkg/browser/browser_darwin.go new file mode 100644 index 00000000000..8507cf7c2b4 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_darwin.go @@ -0,0 +1,5 @@ +package browser + +func openBrowser(url string) error { + return runCmd("open", url) +} diff --git a/vendor/github.com/pkg/browser/browser_freebsd.go b/vendor/github.com/pkg/browser/browser_freebsd.go new file mode 100644 index 00000000000..4fc7ff0761b --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_freebsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_linux.go b/vendor/github.com/pkg/browser/browser_linux.go new file mode 100644 index 00000000000..d26cdddf9c1 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_linux.go @@ -0,0 +1,21 @@ +package browser + +import ( + "os/exec" + "strings" +) + +func openBrowser(url string) error { + providers := []string{"xdg-open", "x-www-browser", "www-browser"} + + // There are multiple possible providers to open a browser on linux + // One of them is xdg-open, another is x-www-browser, then there's www-browser, etc. + // Look for one that exists and run it + for _, provider := range providers { + if _, err := exec.LookPath(provider); err == nil { + return runCmd(provider, url) + } + } + + return &exec.Error{Name: strings.Join(providers, ","), Err: exec.ErrNotFound} +} diff --git a/vendor/github.com/pkg/browser/browser_netbsd.go b/vendor/github.com/pkg/browser/browser_netbsd.go new file mode 100644 index 00000000000..65a5e5a2934 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_netbsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from pkgsrc(7)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go new file mode 100644 index 00000000000..4fc7ff0761b --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_openbsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_unsupported.go b/vendor/github.com/pkg/browser/browser_unsupported.go new file mode 100644 index 00000000000..7c5c17d34d2 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!darwin,!openbsd,!freebsd,!netbsd + +package browser + +import ( + "fmt" + "runtime" +) + +func openBrowser(url string) error { + return fmt.Errorf("openBrowser: unsupported operating system: %v", runtime.GOOS) +} diff --git a/vendor/github.com/pkg/browser/browser_windows.go b/vendor/github.com/pkg/browser/browser_windows.go new file mode 100644 index 00000000000..63e192959a5 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_windows.go @@ -0,0 +1,7 @@ +package browser + +import "golang.org/x/sys/windows" + +func openBrowser(url string) error { + return windows.ShellExecute(0, nil, windows.StringToUTF16Ptr(url), nil, nil, windows.SW_SHOWNORMAL) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/LICENSE similarity index 94% rename from vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE rename to vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/LICENSE index b9d6a27ea92..d6456956733 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/LICENSE @@ -176,7 +176,18 @@ END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/LICENSE b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go index 1bbb61f4786..1ffb3445e90 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go @@ -29,13 +29,15 @@ import ( "strings" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/go-jose/go-jose/v3" "github.com/jellydator/ttlcache/v3" - kvauth "github.com/Azure/azure-sdk-for-go/services/keyvault/auth" - "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys" "github.com/sigstore/sigstore/pkg/signature" sigkms "github.com/sigstore/sigstore/pkg/signature/kms" ) @@ -47,10 +49,10 @@ func init() { } type kvClient interface { - CreateKey(ctx context.Context, vaultBaseURL, keyName string, parameters keyvault.KeyCreateParameters) (result keyvault.KeyBundle, err error) - GetKey(ctx context.Context, vaultBaseURL, keyName, keyVersion string) (result keyvault.KeyBundle, err error) - Sign(ctx context.Context, vaultBaseURL, keyName, keyVersion string, parameters keyvault.KeySignParameters) (result keyvault.KeyOperationResult, err error) - Verify(ctx context.Context, vaultBaseURL, keyName, keyVersion string, parameters keyvault.KeyVerifyParameters) (result keyvault.KeyVerifyResult, err error) + CreateKey(ctx context.Context, name string, parameters azkeys.CreateKeyParameters, options *azkeys.CreateKeyOptions) (azkeys.CreateKeyResponse, error) + GetKey(ctx context.Context, name, version string, options *azkeys.GetKeyOptions) (azkeys.GetKeyResponse, error) + Sign(ctx context.Context, name, version string, parameters azkeys.SignParameters, options *azkeys.SignOptions) (azkeys.SignResponse, error) + Verify(ctx context.Context, name, version string, parameters azkeys.VerifyParameters, options *azkeys.VerifyOptions) (azkeys.VerifyResponse, error) } type azureVaultClient struct { @@ -93,7 +95,7 @@ func parseReference(resourceID string) (vaultURL, vaultName, keyName string, err return } -func newAzureKMS(_ context.Context, keyResourceID string) (*azureVaultClient, error) { +func newAzureKMS(keyResourceID string) (*azureVaultClient, error) { if err := ValidReference(keyResourceID); err != nil { return nil, err } @@ -102,13 +104,13 @@ func newAzureKMS(_ context.Context, keyResourceID string) (*azureVaultClient, er return nil, err } - client, err := getKeysClient() + client, err := getKeysClient(vaultURL) if err != nil { return nil, fmt.Errorf("new azure kms client: %w", err) } azClient := &azureVaultClient{ - client: &client, + client: client, vaultURL: vaultURL, vaultName: vaultName, keyName: keyName, @@ -154,7 +156,11 @@ func getAuthenticationMethod() authenticationMethod { return unknownAuthenticationMethod } -// getAuthorizer takes an authenticationMethod and returns an Authorizer or an error. +type azureCredential interface { + GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) +} + +// getAzureCredential takes an authenticationMethod and returns an Azure credential or an error. // If the method is unknown, Environment will be tested and if it returns an error CLI will be tested. // If the method is specified, the specified method will be used and no other will be tested. // This means the following default order of methods will be used if nothing else is defined: @@ -163,42 +169,51 @@ func getAuthenticationMethod() authenticationMethod { // 3. Username password (FromEnvironment) // 4. MSI (FromEnvironment) // 5. CLI (FromCLI) -func getAuthorizer(method authenticationMethod) (autorest.Authorizer, error) { +func getAzureCredential(method authenticationMethod) (azureCredential, error) { switch method { case environmentAuthenticationMethod: - return kvauth.NewAuthorizerFromEnvironment() + cred, err := azidentity.NewEnvironmentCredential(nil) + if err != nil { + return nil, fmt.Errorf("failed to create default azure credential from env auth method: %w", err) + } + return cred, nil case cliAuthenticationMethod: - return kvauth.NewAuthorizerFromCLI() + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return nil, fmt.Errorf("failed to create default Azure credential from env auth method: %w", err) + } + return cred, nil case unknownAuthenticationMethod: break default: return nil, fmt.Errorf("you should never reach this") } - authorizer, err := kvauth.NewAuthorizerFromEnvironment() + cred, err := azidentity.NewEnvironmentCredential(nil) if err == nil { - return authorizer, nil + return cred, nil } - return kvauth.NewAuthorizerFromCLI() + cred2, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return nil, fmt.Errorf("failed to create default Azure credential from env auth method: %w", err) + } + return cred2, nil } -func getKeysClient() (keyvault.BaseClient, error) { - keyClient := keyvault.New() - +func getKeysClient(vaultURL string) (*azkeys.Client, error) { authMethod := getAuthenticationMethod() - authorizer, err := getAuthorizer(authMethod) + cred, err := getAzureCredential(authMethod) if err != nil { - return keyvault.BaseClient{}, err + return nil, err } - keyClient.Authorizer = authorizer - err = keyClient.AddToUserAgent("sigstore") + client, err := azkeys.NewClient(vaultURL, cred, nil) if err != nil { - return keyvault.BaseClient{}, err + return nil, err } - return keyClient, nil + return client, nil } func (a *azureVaultClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey, error) { @@ -208,19 +223,18 @@ func (a *azureVaultClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey } key := keyBundle.Key - keyType := string(key.Kty) + keyType := key.Kty // Azure Key Vault allows keys to be stored in either default Key Vault storage // or in managed HSMs. If the key is stored in a HSM, the key type is suffixed // with "-HSM". Since this suffix is specific to Azure Key Vault, it needs // be stripped from the key type before attempting to represent the key // with a go-jose/JSONWebKey struct. - if strings.HasSuffix(keyType, "-HSM") { - split := strings.Split(keyType, "-HSM") - // since we split on the suffix, there should be only two elements - // the first element should contain the key type without the -HSM suffix - newKeyType := split[0] - key.Kty = keyvault.JSONWebKeyType(newKeyType) + switch *keyType { + case azkeys.JSONWebKeyTypeECHSM: + *key.Kty = azkeys.JSONWebKeyTypeEC + case azkeys.JSONWebKeyTypeRSAHSM: + *key.Kty = azkeys.JSONWebKeyTypeRSA } jwkJSON, err := json.Marshal(*key) @@ -244,13 +258,13 @@ func (a *azureVaultClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey return pub, nil } -func (a *azureVaultClient) getKey(ctx context.Context) (keyvault.KeyBundle, error) { - key, err := a.client.GetKey(ctx, a.vaultURL, a.keyName, "") +func (a *azureVaultClient) getKey(ctx context.Context) (azkeys.KeyBundle, error) { + resp, err := a.client.GetKey(ctx, a.vaultURL, a.keyName, nil) if err != nil { - return keyvault.KeyBundle{}, fmt.Errorf("public key: %w", err) + return azkeys.KeyBundle{}, fmt.Errorf("public key: %w", err) } - return key, err + return resp.KeyBundle, err } func (a *azureVaultClient) public(ctx context.Context) (crypto.PublicKey, error) { @@ -281,22 +295,21 @@ func (a *azureVaultClient) createKey(ctx context.Context) (crypto.PublicKey, err _, err = a.client.CreateKey( ctx, - a.vaultURL, a.keyName, - keyvault.KeyCreateParameters{ - KeyAttributes: &keyvault.KeyAttributes{ - Enabled: to.BoolPtr(true), + azkeys.CreateKeyParameters{ + KeyAttributes: &azkeys.KeyAttributes{ + Enabled: to.Ptr(true), }, - KeySize: to.Int32Ptr(2048), - KeyOps: &[]keyvault.JSONWebKeyOperation{ - keyvault.Sign, - keyvault.Verify, + KeySize: to.Ptr(int32(2048)), + KeyOps: []*azkeys.JSONWebKeyOperation{ + to.Ptr(azkeys.JSONWebKeyOperationSign), + to.Ptr(azkeys.JSONWebKeyOperationVerify), }, - Kty: keyvault.EC, + Kty: to.Ptr(azkeys.JSONWebKeyTypeEC), Tags: map[string]*string{ - "use": to.StringPtr("sigstore"), + "use": to.Ptr("sigstore"), }, - }) + }, nil) if err != nil { return nil, err } @@ -304,14 +317,14 @@ func (a *azureVaultClient) createKey(ctx context.Context) (crypto.PublicKey, err return a.public(ctx) } -func getKeyVaultSignatureAlgo(algo crypto.Hash) (keyvault.JSONWebKeySignatureAlgorithm, error) { +func getKeyVaultSignatureAlgo(algo crypto.Hash) (azkeys.JSONWebKeySignatureAlgorithm, error) { switch algo { case crypto.SHA256: - return keyvault.ES256, nil + return azkeys.JSONWebKeySignatureAlgorithmES256, nil case crypto.SHA384: - return keyvault.ES384, nil + return azkeys.JSONWebKeySignatureAlgorithmES384, nil case crypto.SHA512: - return keyvault.ES512, nil + return azkeys.JSONWebKeySignatureAlgorithmES512, nil default: return "", fmt.Errorf("unsupported algorithm: %s", algo) } @@ -323,22 +336,29 @@ func (a *azureVaultClient) sign(ctx context.Context, hash []byte, algo crypto.Ha return nil, fmt.Errorf("failed to get KeyVaultSignatureAlgorithm: %w", err) } - params := keyvault.KeySignParameters{ - Algorithm: keyVaultAlgo, - Value: to.StringPtr(base64.RawURLEncoding.EncodeToString(hash)), + encodedHash := make([]byte, base64.RawURLEncoding.EncodedLen(len(hash))) + base64.StdEncoding.Encode(encodedHash, hash) + + params := azkeys.SignParameters{ + Algorithm: &keyVaultAlgo, + Value: encodedHash, } - result, err := a.client.Sign(ctx, a.vaultURL, a.keyName, "", params) + result, err := a.client.Sign(ctx, a.vaultURL, a.keyName, params, nil) if err != nil { return nil, fmt.Errorf("signing the payload: %w", err) } - decResult, err := base64.RawURLEncoding.DecodeString(*result.Result) + decodedRes := make([]byte, base64.RawURLEncoding.DecodedLen(len(result.Result))) + + n, err := base64.StdEncoding.Decode(decodedRes, result.Result) if err != nil { return nil, fmt.Errorf("decoding the result: %w", err) } - return decResult, nil + decodedRes = decodedRes[:n] + + return decodedRes, nil } func (a *azureVaultClient) verify(ctx context.Context, signature, hash []byte, algo crypto.Hash) error { @@ -347,13 +367,19 @@ func (a *azureVaultClient) verify(ctx context.Context, signature, hash []byte, a return fmt.Errorf("failed to get KeyVaultSignatureAlgorithm: %w", err) } - params := keyvault.KeyVerifyParameters{ - Algorithm: keyVaultAlgo, - Digest: to.StringPtr(base64.RawURLEncoding.EncodeToString(hash)), - Signature: to.StringPtr(base64.RawURLEncoding.EncodeToString(signature)), + encodedHash := make([]byte, base64.RawURLEncoding.EncodedLen(len(hash))) + base64.StdEncoding.Encode(encodedHash, hash) + + encodedSignature := make([]byte, base64.RawURLEncoding.EncodedLen(len(signature))) + base64.StdEncoding.Encode(encodedSignature, signature) + + params := azkeys.VerifyParameters{ + Algorithm: &keyVaultAlgo, + Digest: encodedHash, + Signature: encodedSignature, } - result, err := a.client.Verify(ctx, a.vaultURL, a.keyName, "", params) + result, err := a.client.Verify(ctx, a.vaultURL, a.keyName, params, nil) if err != nil { return fmt.Errorf("verify: %w", err) } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go index ebf38893117..154f32a8b75 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go @@ -65,13 +65,13 @@ func LoadSignerVerifier(defaultCtx context.Context, referenceStr string, hashFun } var err error - a.client, err = newAzureKMS(defaultCtx, referenceStr) + a.client, err = newAzureKMS(referenceStr) if err != nil { return nil, err } switch hashFunc { - case 0, crypto.SHA224, crypto.SHA256, crypto.SHA384, crypto.SHA512: + case 0, crypto.SHA256, crypto.SHA384, crypto.SHA512: a.hashFunc = hashFunc default: return nil, errors.New("hash function not supported by Azure Key Vault") diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/LICENSE b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/LICENSE b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go index 3141a7f1b98..6fc2838a3fb 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -431,6 +431,14 @@ func (s *String) readBase128Int(out *int) bool { } ret <<= 7 b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + ret |= int(b & 0x7f) if b&0x80 == 0 { *out = ret diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f965579f7d5..ac90a2631c9 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1266,6 +1266,27 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return res, nil } + cancelRequest := func(cs *clientStream, err error) error { + cs.cc.mu.Lock() + defer cs.cc.mu.Unlock() + cs.abortStreamLocked(err) + if cs.ID != 0 { + // This request may have failed because of a problem with the connection, + // or for some unrelated reason. (For example, the user might have canceled + // the request without waiting for a response.) Mark the connection as + // not reusable, since trying to reuse a dead connection is worse than + // unnecessarily creating a new one. + // + // If cs.ID is 0, then the request was never allocated a stream ID and + // whatever went wrong was unrelated to the connection. We might have + // timed out waiting for a stream slot when StrictMaxConcurrentStreams + // is set, for example, in which case retrying on a different connection + // will not help. + cs.cc.doNotReuse = true + } + return err + } + for { select { case <-cs.respHeaderRecv: @@ -1280,15 +1301,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return handleResponseHeaders() default: waitDone() - return nil, cs.abortErr + return nil, cancelRequest(cs, cs.abortErr) } case <-ctx.Done(): - err := ctx.Err() - cs.abortStream(err) - return nil, err + return nil, cancelRequest(cs, ctx.Err()) case <-cs.reqCancel: - cs.abortStream(errRequestCanceled) - return nil, errRequestCanceled + return nil, cancelRequest(cs, errRequestCanceled) } } } diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go index 97db2340ec9..84fcc32b634 100644 --- a/vendor/golang.org/x/net/internal/socks/socks.go +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -289,7 +289,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, case AuthMethodNotRequired: return nil case AuthMethodUsernamePassword: - if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 { return errors.New("invalid username/password") } b := []byte{authUsernamePasswordVersion} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index b3e8783cc59..2cf71f0f93f 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -8,7 +8,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -142,10 +141,8 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // Second, try a well-known file. filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, params); err == nil { - return creds, nil - } else if !os.IsNotExist(err) { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + if b, err := os.ReadFile(filename); err == nil { + return CredentialsFromJSONWithParams(ctx, b, params) } // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) @@ -231,7 +228,7 @@ func wellKnownFile() string { } func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { - b, err := ioutil.ReadFile(filename) + b, err := os.ReadFile(filename) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index c0ab196cf46..14989beaf49 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -14,7 +14,7 @@ import ( // ParseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key +// PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (*rsa.PrivateKey, error) { diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index b4723fcacea..58901bda53e 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -55,12 +55,18 @@ type Token struct { } // tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. +// providers returning a token or error in JSON form. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 type tokenJSON struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + // error fields + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` } func (e *tokenJSON) expiry() (t time.Time) { @@ -236,21 +242,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ - Response: r, - Body: body, - } + + failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + retrieveError := &RetrieveError{ + Response: r, + Body: body, + // attempt to populate error detail below } var token *Token content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string vals, err := url.ParseQuery(string(body)) if err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") token = &Token{ AccessToken: vals.Get("access_token"), TokenType: vals.Get("token_type"), @@ -265,8 +279,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { default: var tj tokenJSON if err = json.Unmarshal(body, &tj); err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI token = &Token{ AccessToken: tj.AccessToken, TokenType: tj.TokenType, @@ -276,17 +296,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || retrieveError.ErrorCode != "" { + return nil, retrieveError + } if token.AccessToken == "" { return nil, errors.New("oauth2: server response missing access_token") } return token, nil } +// mirrors oauth2.RetrieveError type RetrieveError struct { - Response *http.Response - Body []byte + Response *http.Response + Body []byte + ErrorCode string + ErrorDescription string + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 7c64006de69..5ffce9764be 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -175,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) } // RetrieveError is the error returned when the token endpoint returns a -// non-2XX HTTP status code. +// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 type RetrieveError struct { Response *http.Response // Body is the body that was consumed by reading Response.Body. // It may be truncated. Body []byte + // ErrorCode is RFC 6749's 'error' parameter. + ErrorCode string + // ErrorDescription is RFC 6749's 'error_description' parameter. + ErrorDescription string + // ErrorURI is RFC 6749's 'error_uri' parameter. + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/modules.txt b/vendor/modules.txt index c1773e55bf5..25304c3e564 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -24,10 +24,44 @@ contrib.go.opencensus.io/exporter/ocagent contrib.go.opencensus.io/exporter/prometheus # github.com/Azure/azure-sdk-for-go v68.0.0+incompatible ## explicit -github.com/Azure/azure-sdk-for-go/services/keyvault/auth -github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry github.com/Azure/azure-sdk-for-go/version +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/azcore +github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared +github.com/Azure/azure-sdk-for-go/sdk/azcore/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/policy +github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime +github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming +github.com/Azure/azure-sdk-for-go/sdk/azcore/to +github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/azidentity +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/internal/diag +github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo +github.com/Azure/azure-sdk-for-go/sdk/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/internal/log +github.com/Azure/azure-sdk-for-go/sdk/internal/poller +github.com/Azure/azure-sdk-for-go/sdk/internal/temporal +github.com/Azure/azure-sdk-for-go/sdk/internal/uuid +# github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys +# github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal # github.com/Azure/go-autorest v14.2.0+incompatible ## explicit github.com/Azure/go-autorest @@ -47,18 +81,35 @@ github.com/Azure/go-autorest/autorest/azure/cli # github.com/Azure/go-autorest/autorest/date v0.3.0 ## explicit; go 1.12 github.com/Azure/go-autorest/autorest/date -# github.com/Azure/go-autorest/autorest/to v0.4.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/to -# github.com/Azure/go-autorest/autorest/validation v0.3.1 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/validation # github.com/Azure/go-autorest/logger v0.2.1 ## explicit; go 1.12 github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing +# github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 +## explicit; go 1.18 +github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache +github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential +github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version +github.com/AzureAD/microsoft-authentication-library-for-go/apps/public # github.com/Microsoft/go-winio v0.6.1 ## explicit; go 1.17 github.com/Microsoft/go-winio @@ -615,6 +666,10 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash +# github.com/kylelemons/godebug v1.1.0 +## explicit; go 1.11 +github.com/kylelemons/godebug/diff +github.com/kylelemons/godebug/pretty # github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf ## explicit; go 1.18 github.com/letsencrypt/boulder/core @@ -631,6 +686,10 @@ github.com/letsencrypt/boulder/sa/proto github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mattn/go-colorable v0.1.9 +## explicit; go 1.13 +# github.com/mattn/go-isatty v0.0.14 +## explicit; go 1.12 # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil @@ -674,6 +733,9 @@ github.com/openzipkin/zipkin-go/model github.com/pjbgf/sha1cd github.com/pjbgf/sha1cd/internal github.com/pjbgf/sha1cd/ubc +# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 +## explicit; go 1.14 +github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -719,18 +781,26 @@ github.com/shurcooL/githubv4 github.com/shurcooL/graphql github.com/shurcooL/graphql/ident github.com/shurcooL/graphql/internal/jsonutil -# github.com/sigstore/sigstore v1.6.4 +# github.com/sigstore/sigstore v1.6.5 ## explicit; go 1.18 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature github.com/sigstore/sigstore/pkg/signature/kms +github.com/sigstore/sigstore/pkg/signature/kms/fake +github.com/sigstore/sigstore/pkg/signature/options +github.com/sigstore/sigstore/pkg/signature/payload +# github.com/sigstore/sigstore/pkg/signature/kms/aws v1.6.5 +## explicit; go 1.18 github.com/sigstore/sigstore/pkg/signature/kms/aws +# github.com/sigstore/sigstore/pkg/signature/kms/azure v1.6.5 +## explicit; go 1.18 github.com/sigstore/sigstore/pkg/signature/kms/azure -github.com/sigstore/sigstore/pkg/signature/kms/fake +# github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.6.5 +## explicit; go 1.18 github.com/sigstore/sigstore/pkg/signature/kms/gcp +# github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.6.5 +## explicit; go 1.18 github.com/sigstore/sigstore/pkg/signature/kms/hashivault -github.com/sigstore/sigstore/pkg/signature/options -github.com/sigstore/sigstore/pkg/signature/payload # github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -863,7 +933,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/ztest go.uber.org/zap/zapcore go.uber.org/zap/zaptest -# golang.org/x/crypto v0.8.0 +# golang.org/x/crypto v0.9.0 ## explicit; go 1.17 golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -897,7 +967,7 @@ golang.org/x/exp/maps golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.9.0 +# golang.org/x/net v0.10.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -910,7 +980,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.7.0 +# golang.org/x/oauth2 v0.8.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler