diff --git a/go.mod b/go.mod index f74167351e3..3180edc5c76 100644 --- a/go.mod +++ b/go.mod @@ -31,6 +31,7 @@ require ( github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200504092944-27473ea1ae43 github.com/openshift/cluster-autoscaler-operator v0.0.0-20190521201101-62768a6ba480 github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 + github.com/openshift/hive/pkg/apis v0.0.0 github.com/openshift/installer v0.9.0-master.0.20210201172249-df32ad26dd6f github.com/openshift/library-go v0.0.0-20201109112824-093ad3cf6600 github.com/openshift/machine-api-operator v0.2.1-0.20201111151924-77300d0c997a @@ -102,3 +103,6 @@ replace ( // needed because otherwise v12.0.0 is picked up as a more recent version replace k8s.io/client-go => k8s.io/client-go v0.19.5 + +// submodule +replace github.com/openshift/hive/pkg/apis => ./pkg/apis diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod new file mode 100644 index 00000000000..016298ed17e --- /dev/null +++ b/pkg/apis/go.mod @@ -0,0 +1,8 @@ +module github.com/openshift/hive/pkg/apis + +go 1.15 + +require ( + k8s.io/api v0.19.5 + k8s.io/apimachinery v0.19.5 +) diff --git a/pkg/apis/go.sum b/pkg/apis/go.sum new file mode 100644 index 00000000000..1c27cb81937 --- /dev/null +++ b/pkg/apis/go.sum @@ -0,0 +1,185 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openshift/api v3.9.0+incompatible h1:fJ/KsefYuZAjmrr3+5U9yZIZbTOpVkDDLDLFresAeYs= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw= +k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= +k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg= +k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +sigs.k8s.io/controller-runtime v0.8.1 h1:O0K2CJ2JavK8/Tf4LfcpAwRxOFBhv8DjyrbmE6Qw59s= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/openshift/hive/pkg/apis/addtoscheme_hive_v1.go b/vendor/github.com/openshift/hive/pkg/apis/addtoscheme_hive_v1.go new file mode 100644 index 00000000000..412def80417 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/addtoscheme_hive_v1.go @@ -0,0 +1,10 @@ +package apis + +import ( + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, hivev1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/addtoscheme_hiveinternal_v1alpha1.go b/vendor/github.com/openshift/hive/pkg/apis/addtoscheme_hiveinternal_v1alpha1.go new file mode 100644 index 00000000000..6fa09d3947b --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/addtoscheme_hiveinternal_v1alpha1.go @@ -0,0 +1,10 @@ +package apis + +import ( + hiveintv1alpha1 "github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, hiveintv1alpha1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/apis.go b/vendor/github.com/openshift/hive/pkg/apis/apis.go new file mode 100644 index 00000000000..ef8102d1a52 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/apis.go @@ -0,0 +1,14 @@ +// Package apis contains Kubernetes API groups. +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/go.mod b/vendor/github.com/openshift/hive/pkg/apis/go.mod new file mode 100644 index 00000000000..016298ed17e --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/go.mod @@ -0,0 +1,8 @@ +module github.com/openshift/hive/pkg/apis + +go 1.15 + +require ( + k8s.io/api v0.19.5 + k8s.io/apimachinery v0.19.5 +) diff --git a/vendor/github.com/openshift/hive/pkg/apis/go.sum b/vendor/github.com/openshift/hive/pkg/apis/go.sum new file mode 100644 index 00000000000..1c27cb81937 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/go.sum @@ -0,0 +1,185 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openshift/api v3.9.0+incompatible h1:fJ/KsefYuZAjmrr3+5U9yZIZbTOpVkDDLDLFresAeYs= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw= +k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= +k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg= +k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +sigs.k8s.io/controller-runtime v0.8.1 h1:O0K2CJ2JavK8/Tf4LfcpAwRxOFBhv8DjyrbmE6Qw59s= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/openshift/hive/pkg/apis/helpers/namer.go b/vendor/github.com/openshift/hive/pkg/apis/helpers/namer.go new file mode 100644 index 00000000000..2c294f8eb9b --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/helpers/namer.go @@ -0,0 +1,70 @@ +package helpers + +import ( + "fmt" + "hash/fnv" + + "k8s.io/apimachinery/pkg/util/validation" +) + +// GetName returns a name given a base ("deployment-5") and a suffix ("deploy") +// It will first attempt to join them with a dash. If the resulting name is longer +// than maxLength: if the suffix is too long, it will truncate the base name and add +// an 8-character hash of the [base]-[suffix] string. If the suffix is not too long, +// it will truncate the base, add the hash of the base and return [base]-[hash]-[suffix] +func GetName(base, suffix string, maxLength int) string { + if maxLength <= 0 { + return "" + } + name := fmt.Sprintf("%s-%s", base, suffix) + if len(name) <= maxLength { + return name + } + + baseLength := maxLength - 10 /*length of -hash-*/ - len(suffix) + + // if the suffix is too long, ignore it + if baseLength < 0 { + prefix := base[0:min(len(base), max(0, maxLength-9))] + // Calculate hash on initial base-suffix string + shortName := fmt.Sprintf("%s-%s", prefix, hash(name)) + return shortName[:min(maxLength, len(shortName))] + } + + prefix := base[0:baseLength] + // Calculate hash on initial base-suffix string + return fmt.Sprintf("%s-%s-%s", prefix, hash(base), suffix) +} + +// GetResourceName returns a generated name with the default max length +// for most kubernetes resources. This should only be used for resources that +// have default name validation. +func GetResourceName(base, suffix string) string { + return GetName(base, suffix, validation.DNS1123LabelMaxLength) +} + +// max returns the greater of its 2 inputs +func max(a, b int) int { + if b > a { + return b + } + return a +} + +// min returns the lesser of its 2 inputs +func min(a, b int) int { + if b < a { + return b + } + return a +} + +// hash calculates the hexadecimal representation (8-chars) +// of the hash of the passed in string using the FNV-a algorithm +func hash(s string) string { + hash := fnv.New32a() + hash.Write([]byte(s)) + intHash := hash.Sum32() + result := fmt.Sprintf("%08x", intHash) + return result +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/doc.go new file mode 100644 index 00000000000..82ff28ca823 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/doc.go @@ -0,0 +1,4 @@ +// Package agent contains API Schema definitions for assisted agent based installations. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hive +package agent diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/installstrategy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/installstrategy.go new file mode 100644 index 00000000000..ec1b0661c46 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/installstrategy.go @@ -0,0 +1,76 @@ +package agent + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// InstallStrategy is the install strategy configuration for provisioning a cluster with the +// Agent based assisted installer. +type InstallStrategy struct { + // Networking is the configuration for the pod network provider in + // the cluster. + Networking Networking `json:"networking"` + + // SSHPublicKey will be added to all cluster hosts for use in debugging. + // +optional + SSHPublicKey string `json:"sshPublicKey,omitempty"` + + // AgentSelector is a label selector used for associating relevant custom resources with this cluster. + // (Agent, BareMetalHost, etc) + AgentSelector metav1.LabelSelector `json:"agentSelector"` + + // ProvisionRequirements defines configuration for when the installation is ready to be launched automatically. + ProvisionRequirements ProvisionRequirements `json:"provisionRequirements"` +} + +// ProvisionRequirements defines configuration for when the installation is ready to be launched automatically. +type ProvisionRequirements struct { + + // ControlPlaneAgents is the number of matching approved and ready Agents with the control plane role + // required to launch the install. Must be either 1 or 3. + ControlPlaneAgents int `json:"controlPlaneAgents"` + + // WorkerAgents is the minimum number of matching approved and ready Agents with the worker role + // required to launch the install. + // +optional + WorkerAgents int `json:"workerAgents,omitempty"` +} + +// Networking defines the pod network provider in the cluster. +type Networking struct { + // MachineNetwork is the list of IP address pools for machines. + MachineNetwork []MachineNetworkEntry `json:"machineNetwork"` + + // ClusterNetwork is the list of IP address pools for pods. + // Default is 10.128.0.0/14 and a host prefix of /23. + // + // +optional + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // ServiceNetwork is the list of IP address pools for services. + // Default is 172.30.0.0/16. + // NOTE: currently only one entry is supported. + // + // +kubebuilder:validation:MaxItems=1 + // +optional + ServiceNetwork []string `json:"serviceNetwork,omitempty"` +} + +// MachineNetworkEntry is a single IP address block for node IP blocks. +type MachineNetworkEntry struct { + // CIDR is the IP block address pool for machines within the cluster. + CIDR string `json:"cidr"` +} + +// ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks +// are allocated with size 2^HostSubnetLength. +type ClusterNetworkEntry struct { + // CIDR is the IP block address pool. + CIDR string `json:"cidr"` + + // HostPrefix is the prefix size to allocate to each node from the CIDR. + // For example, 24 would allocate 2^8=256 adresses to each node. If this + // field is not used by the plugin, it can be left unset. + // +optional + HostPrefix int32 `json:"hostPrefix,omitempty"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/platform.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/platform.go new file mode 100644 index 00000000000..cb898ea3e30 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/platform.go @@ -0,0 +1,28 @@ +package agent + +// BareMetalPlatform defines agent based install configuration specific to bare metal clusters. +// Can only be used with spec.installStrategy.agent. +type BareMetalPlatform struct { + // APIVIP is the virtual IP used to reach the OpenShift cluster's API. + APIVIP string `json:"apiVIP"` + + // APIVIPDNSName is the domain name used to reach the OpenShift cluster API. + // +optional + APIVIPDNSName string `json:"apiVIPDNSName,omitempty"` + + // IngressVIP is the virtual IP used for cluster ingress traffic. + IngressVIP string `json:"ingressVIP"` + + // VIPDHCPAllocation indicates if virtual IP DHCP allocation mode is enabled. + // +optional + VIPDHCPAllocation VIPDHCPAllocationType `json:"vipDHCPAllocation"` +} + +// VIPDHCPAllocationType is a valid value for bareMetalPlatform.vipDHCPAllocation. +// +kubebuilder:validation:Enum="";"Enabled" +type VIPDHCPAllocationType string + +const ( + Disabled VIPDHCPAllocationType = "" + Enabled VIPDHCPAllocationType = "Enabled" +) diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/status.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/status.go new file mode 100644 index 00000000000..a9b362d9c5d --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/status.go @@ -0,0 +1,20 @@ +package agent + +// InstallStrategyStatus defines the observed state of the Agent install strategy for this cluster. +type InstallStrategyStatus struct { + + // ControlPlaneAgentsDiscovered is the number of Agents currently linked to this ClusterDeployment. + // +optional + ControlPlaneAgentsDiscovered int `json:"controlPlaneAgentsDiscovered,omitempty"` + // ControlPlaneAgentsDiscovered is the number of Agents currently linked to this ClusterDeployment that are ready for use. + // +optional + ControlPlaneAgentsReady int `json:"controlPlaneAgentsReady,omitempty"` + // WorkerAgentsDiscovered is the number of worker Agents currently linked to this ClusterDeployment. + // +optional + WorkerAgentsDiscovered int `json:"workerAgentsDiscovered,omitempty"` + // WorkerAgentsDiscovered is the number of worker Agents currently linked to this ClusterDeployment that are ready for use. + // +optional + WorkerAgentsReady int `json:"workerAgentsReady,omitempty"` + + ConnectivityMajorityGroups string `json:"connectivityMajorityGroups,omitempty"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/zz_generated.deepcopy.go new file mode 100644 index 00000000000..8f123acb056 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/agent/zz_generated.deepcopy.go @@ -0,0 +1,135 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package agent + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatform) DeepCopyInto(out *BareMetalPlatform) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatform. +func (in *BareMetalPlatform) DeepCopy() *BareMetalPlatform { + if in == nil { + return nil + } + out := new(BareMetalPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallStrategy) DeepCopyInto(out *InstallStrategy) { + *out = *in + in.Networking.DeepCopyInto(&out.Networking) + in.AgentSelector.DeepCopyInto(&out.AgentSelector) + out.ProvisionRequirements = in.ProvisionRequirements + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallStrategy. +func (in *InstallStrategy) DeepCopy() *InstallStrategy { + if in == nil { + return nil + } + out := new(InstallStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallStrategyStatus) DeepCopyInto(out *InstallStrategyStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallStrategyStatus. +func (in *InstallStrategyStatus) DeepCopy() *InstallStrategyStatus { + if in == nil { + return nil + } + out := new(InstallStrategyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineNetworkEntry) DeepCopyInto(out *MachineNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineNetworkEntry. +func (in *MachineNetworkEntry) DeepCopy() *MachineNetworkEntry { + if in == nil { + return nil + } + out := new(MachineNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.MachineNetwork != nil { + in, out := &in.MachineNetwork, &out.MachineNetwork + *out = make([]MachineNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionRequirements) DeepCopyInto(out *ProvisionRequirements) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionRequirements. +func (in *ProvisionRequirements) DeepCopy() *ProvisionRequirements { + if in == nil { + return nil + } + out := new(ProvisionRequirements) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/doc.go new file mode 100644 index 00000000000..fbeefb46c04 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/doc.go @@ -0,0 +1,3 @@ +// Package aws contains API Schema definitions for AWS clusters. +// +k8s:deepcopy-gen=package,register +package aws diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/machinepool.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/machinepool.go new file mode 100644 index 00000000000..1d3b7da1fa2 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/machinepool.go @@ -0,0 +1,44 @@ +package aws + +// MachinePoolPlatform stores the configuration for a machine pool +// installed on AWS. +type MachinePoolPlatform struct { + // Zones is list of availability zones that can be used. + Zones []string `json:"zones,omitempty"` + + // Subnets is the list of subnets to which to attach the machines. + // There must be exactly one private subnet for each availability zone used. + // If public subnets are specified, there must be exactly one private and one public subnet specified for each availability zone. + Subnets []string `json:"subnets,omitempty"` + + // InstanceType defines the ec2 instance type. + // eg. m4-large + InstanceType string `json:"type"` + + // EC2RootVolume defines the storage for ec2 instance. + EC2RootVolume `json:"rootVolume"` + + // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. + // +optional + SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` +} + +// SpotMarketOptions defines the options available to a user when configuring +// Machines to run on Spot instances. +// Most users should provide an empty struct. +type SpotMarketOptions struct { + // The maximum price the user is willing to pay for their instances + // Default: On-Demand price + // +optional + MaxPrice *string `json:"maxPrice,omitempty"` +} + +// EC2RootVolume defines the storage for an ec2 instance. +type EC2RootVolume struct { + // IOPS defines the iops for the storage. + IOPS int `json:"iops"` + // Size defines the size of the storage. + Size int `json:"size"` + // Type defines the type of the storage. + Type string `json:"type"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/platform.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/platform.go new file mode 100644 index 00000000000..6aba49fa6af --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/platform.go @@ -0,0 +1,20 @@ +package aws + +import ( + corev1 "k8s.io/api/core/v1" +) + +// Platform stores all the global configuration that +// all machinesets use. +type Platform struct { + // CredentialsSecretRef refers to a secret that contains the AWS account access + // credentials. + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // Region specifies the AWS region where the cluster will be created. + Region string `json:"region"` + + // UserTags specifies additional tags for AWS resources created for the cluster. + // +optional + UserTags map[string]string `json:"userTags,omitempty"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/zz_generated.deepcopy.go new file mode 100644 index 00000000000..e6ac0880550 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/aws/zz_generated.deepcopy.go @@ -0,0 +1,98 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package aws + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2RootVolume) DeepCopyInto(out *EC2RootVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2RootVolume. +func (in *EC2RootVolume) DeepCopy() *EC2RootVolume { + if in == nil { + return nil + } + out := new(EC2RootVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolPlatform) DeepCopyInto(out *MachinePoolPlatform) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.EC2RootVolume = in.EC2RootVolume + if in.SpotMarketOptions != nil { + in, out := &in.SpotMarketOptions, &out.SpotMarketOptions + *out = new(SpotMarketOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolPlatform. +func (in *MachinePoolPlatform) DeepCopy() *MachinePoolPlatform { + if in == nil { + return nil + } + out := new(MachinePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + if in.UserTags != nil { + in, out := &in.UserTags, &out.UserTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) { + *out = *in + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMarketOptions. +func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions { + if in == nil { + return nil + } + out := new(SpotMarketOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/doc.go new file mode 100644 index 00000000000..55624305bf4 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/doc.go @@ -0,0 +1,4 @@ +// Package azure contains API Schema definitions for Azure cluster. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hive +package azure diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/machinepool.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/machinepool.go new file mode 100644 index 00000000000..9bb9b7f9695 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/machinepool.go @@ -0,0 +1,41 @@ +package azure + +// MachinePool stores the configuration for a machine pool installed +// on Azure. +type MachinePool struct { + // Zones is list of availability zones that can be used. + // eg. ["1", "2", "3"] + Zones []string `json:"zones,omitempty"` + + // InstanceType defines the azure instance type. + // eg. Standard_DS_V2 + InstanceType string `json:"type"` + + // OSDisk defines the storage for instance. + OSDisk `json:"osDisk"` +} + +// OSDisk defines the disk for machines on Azure. +type OSDisk struct { + // DiskSizeGB defines the size of disk in GB. + DiskSizeGB int32 `json:"diskSizeGB"` +} + +// Set sets the values from `required` to `a`. +func (a *MachinePool) Set(required *MachinePool) { + if required == nil || a == nil { + return + } + + if len(required.Zones) > 0 { + a.Zones = required.Zones + } + + if required.InstanceType != "" { + a.InstanceType = required.InstanceType + } + + if required.OSDisk.DiskSizeGB != 0 { + a.OSDisk.DiskSizeGB = required.OSDisk.DiskSizeGB + } +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/metadata.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/metadata.go new file mode 100644 index 00000000000..472c01c4592 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/metadata.go @@ -0,0 +1,6 @@ +package azure + +// Metadata contains Azure metadata (e.g. for uninstalling the cluster). +type Metadata struct { + Region string `json:"region"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/platform.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/platform.go new file mode 100644 index 00000000000..62613c0ba1e --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/platform.go @@ -0,0 +1,28 @@ +package azure + +import ( + "strings" + + corev1 "k8s.io/api/core/v1" +) + +// Platform stores all the global configuration that all machinesets +// use. +type Platform struct { + // CredentialsSecretRef refers to a secret that contains the Azure account access + // credentials. + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // Region specifies the Azure region where the cluster will be created. + Region string `json:"region"` + + // BaseDomainResourceGroupName specifies the resource group where the azure DNS zone for the base domain is found + BaseDomainResourceGroupName string `json:"baseDomainResourceGroupName,omitempty"` +} + +//SetBaseDomain parses the baseDomainID and sets the related fields on azure.Platform +func (p *Platform) SetBaseDomain(baseDomainID string) error { + parts := strings.Split(baseDomainID, "/") + p.BaseDomainResourceGroupName = parts[4] + return nil +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/zz_generated.deepcopy.go new file mode 100644 index 00000000000..bcb189e1054 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/azure/zz_generated.deepcopy.go @@ -0,0 +1,76 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package azure + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.OSDisk = in.OSDisk + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/baremetal/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/baremetal/doc.go new file mode 100644 index 00000000000..3e648606c9d --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/baremetal/doc.go @@ -0,0 +1,4 @@ +// Package baremetal contains API Schema definitions for bare metal clusters. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hive +package baremetal diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/baremetal/platform.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/baremetal/platform.go new file mode 100644 index 00000000000..24ffb475d5b --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/baremetal/platform.go @@ -0,0 +1,11 @@ +package baremetal + +import corev1 "k8s.io/api/core/v1" + +// Platform stores the global configuration for the cluster. +type Platform struct { + // LibvirtSSHPrivateKeySecretRef is the reference to the secret that contains the private SSH key to use + // for access to the libvirt provisioning host. + // The SSH private key is expected to be in the secret data under the "ssh-privatekey" key. + LibvirtSSHPrivateKeySecretRef corev1.LocalObjectReference `json:"libvirtSSHPrivateKeySecretRef"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/baremetal/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/baremetal/zz_generated.deepcopy.go new file mode 100644 index 00000000000..f2fe0b02285 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/baremetal/zz_generated.deepcopy.go @@ -0,0 +1,22 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package baremetal + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + out.LibvirtSSHPrivateKeySecretRef = in.LibvirtSSHPrivateKeySecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/checkpoint_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/checkpoint_types.go new file mode 100644 index 00000000000..11ba255d1df --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/checkpoint_types.go @@ -0,0 +1,55 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CheckpointSpec defines the metadata around the Hive objects state in the namespace at the time of the last backup. +type CheckpointSpec struct { + // LastBackupChecksum is the checksum of all Hive objects in the namespace at the time of the last backup. + LastBackupChecksum string `json:"lastBackupChecksum"` + + // LastBackupTime is the last time we performed a backup of the namespace + LastBackupTime metav1.Time `json:"lastBackupTime"` + + // LastBackupRef is a reference to last backup object created + LastBackupRef BackupReference `json:"lastBackupRef"` +} + +// BackupReference is a reference to a backup resource +type BackupReference struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} + +// CheckpointStatus defines the observed state of Checkpoint +type CheckpointStatus struct { +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Checkpoint is the Schema for the backup of Hive objects. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced +type Checkpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CheckpointSpec `json:"spec,omitempty"` + Status CheckpointStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CheckpointList contains a list of Checkpoint +type CheckpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Checkpoint `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Checkpoint{}, &CheckpointList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterclaim_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterclaim_types.go new file mode 100644 index 00000000000..04450270b49 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterclaim_types.go @@ -0,0 +1,104 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterClaimSpec defines the desired state of the ClusterClaim. +type ClusterClaimSpec struct { + // ClusterPoolName is the name of the cluster pool from which to claim a cluster. + ClusterPoolName string `json:"clusterPoolName"` + + // Subjects hold references to which to authorize access to the claimed cluster. + // +optional + Subjects []rbacv1.Subject `json:"subjects,omitempty"` + + // Namespace is the namespace containing the ClusterDeployment (name will match the namespace) of the claimed cluster. + // This field will be set as soon as a suitable cluster can be found, however that cluster may still be + // resuming and not yet ready for use. Wait for the ClusterRunning condition to be true to avoid this issue. + // +optional + Namespace string `json:"namespace,omitempty"` + + // Lifetime is the maximum lifetime of the claim after it is assigned a cluster. If the claim still exists + // when the lifetime has elapsed, the claim will be deleted by Hive. + // +optional + Lifetime *metav1.Duration `json:"lifetime,omitempty"` +} + +// ClusterClaimStatus defines the observed state of ClusterClaim. +type ClusterClaimStatus struct { + // Conditions includes more detailed status for the cluster pool. + // +optional + Conditions []ClusterClaimCondition `json:"conditions,omitempty"` + + // Lifetime is the maximum lifetime of the claim after it is assigned a cluster. If the claim still exists + // when the lifetime has elapsed, the claim will be deleted by Hive. + // +optional + Lifetime *metav1.Duration `json:"lifetime,omitempty"` +} + +// ClusterClaimCondition contains details for the current condition of a cluster claim. +type ClusterClaimCondition struct { + // Type is the type of the condition. + Type ClusterClaimConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterClaimConditionType is a valid value for ClusterClaimCondition.Type. +type ClusterClaimConditionType string + +const ( + // ClusterClaimPendingCondition is set when a cluster has not yet been assigned and made ready to the claim. + ClusterClaimPendingCondition ClusterClaimConditionType = "Pending" + // ClusterClaimClusterDeletedCondition is set when the cluster assigned to the claim has been deleted. + ClusterClaimClusterDeletedCondition ClusterClaimConditionType = "ClusterDeleted" + // ClusterRunningCondition is true when a claimed cluster is running and ready for use. + ClusterRunningCondition ClusterClaimConditionType = "ClusterRunning" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterClaim represents a claim to a cluster from a cluster pool. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clusterclaims +// +kubebuilder:printcolumn:name="Pool",type="string",JSONPath=".spec.clusterPoolName" +// +kubebuilder:printcolumn:name="Pending",type="string",JSONPath=".status.conditions[?(@.type=='Pending')].reason" +// +kubebuilder:printcolumn:name="ClusterNamespace",type="string",JSONPath=".spec.namespace" +// +kubebuilder:printcolumn:name="ClusterRunning",type="string",JSONPath=".status.conditions[?(@.type=='ClusterRunning')].reason" +type ClusterClaim struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterClaimSpec `json:"spec"` + Status ClusterClaimStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterClaimList contains a list of ClusterClaims. +type ClusterClaimList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterClaim `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterClaim{}, &ClusterClaimList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterdeployment_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterdeployment_types.go new file mode 100644 index 00000000000..c208c9e3633 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterdeployment_types.go @@ -0,0 +1,581 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/hive/pkg/apis/hive/v1/agent" + "github.com/openshift/hive/pkg/apis/hive/v1/aws" + "github.com/openshift/hive/pkg/apis/hive/v1/azure" + "github.com/openshift/hive/pkg/apis/hive/v1/baremetal" + "github.com/openshift/hive/pkg/apis/hive/v1/gcp" + "github.com/openshift/hive/pkg/apis/hive/v1/openstack" + "github.com/openshift/hive/pkg/apis/hive/v1/ovirt" + "github.com/openshift/hive/pkg/apis/hive/v1/vsphere" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// Important: Run "make" to regenerate code after modifying this file + +const ( + // FinalizerDeprovision is used on ClusterDeployments to ensure we run a successful deprovision + // job before cleaning up the API object. + FinalizerDeprovision string = "hive.openshift.io/deprovision" + + // HiveClusterTypeLabel is an optional label that can be applied to ClusterDeployments. It is + // shown in short output, usable in searching, and adds metrics vectors which can be used to + // alert on cluster types differently. + HiveClusterTypeLabel = "hive.openshift.io/cluster-type" + + // DefaultClusterType will be used when the above HiveClusterTypeLabel is unset. This + // value will not be added as a label, only used for metrics vectors. + DefaultClusterType = "unspecified" + + // HiveInstallLogLabel is used on ConfigMaps uploaded by the install manager which contain an install log. + HiveInstallLogLabel = "hive.openshift.io/install-log" + + // HiveClusterPlatformLabel is a label that is applied to ClusterDeployments + // to denote which platform the cluster was created on. This can be used in + // searching and filtering clusters, as well as in SelectorSyncSets to only + // target specific cloud platforms. + HiveClusterPlatformLabel = "hive.openshift.io/cluster-platform" + + // HiveClusterRegionLabel is a label that is applied to ClusterDeployments + // to denote which region the cluster was created in. This can be used in + // searching and filtering clusters, as well as in SelectorSyncSets to only + // target specific regions of the cluster-platform. + HiveClusterRegionLabel = "hive.openshift.io/cluster-region" +) + +// ClusterPowerState is used to indicate whether a cluster is running or in a +// hibernating state. +// +kubebuilder:validation:Enum="";Running;Hibernating +type ClusterPowerState string + +const ( + // RunningClusterPowerState is the default state of a cluster after it has + // been installed. All of its machines should be running. + RunningClusterPowerState ClusterPowerState = "Running" + + // HibernatingClusterPowerState is used to stop the machines belonging to a cluster + // and move it to a hibernating state. + HibernatingClusterPowerState ClusterPowerState = "Hibernating" +) + +// ClusterDeploymentSpec defines the desired state of ClusterDeployment +type ClusterDeploymentSpec struct { + + // ClusterName is the friendly name of the cluster. It is used for subdomains, + // some resource tagging, and other instances where a friendly name for the + // cluster is useful. + // +required + ClusterName string `json:"clusterName"` + + // BaseDomain is the base domain to which the cluster should belong. + // +required + BaseDomain string `json:"baseDomain"` + + // Platform is the configuration for the specific platform upon which to + // perform the installation. + // +required + Platform Platform `json:"platform"` + + // PullSecretRef is the reference to the secret to use when pulling images. + // +optional + PullSecretRef *corev1.LocalObjectReference `json:"pullSecretRef,omitempty"` + + // PreserveOnDelete allows the user to disconnect a cluster from Hive without deprovisioning it + PreserveOnDelete bool `json:"preserveOnDelete,omitempty"` + + // ControlPlaneConfig contains additional configuration for the target cluster's control plane + // +optional + ControlPlaneConfig ControlPlaneConfigSpec `json:"controlPlaneConfig,omitempty"` + + // Ingress allows defining desired clusteringress/shards to be configured on the cluster. + // +optional + Ingress []ClusterIngress `json:"ingress,omitempty"` + + // CertificateBundles is a list of certificate bundles associated with this cluster + // +optional + CertificateBundles []CertificateBundleSpec `json:"certificateBundles,omitempty"` + + // ManageDNS specifies whether a DNSZone should be created and managed automatically + // for this ClusterDeployment + // +optional + ManageDNS bool `json:"manageDNS,omitempty"` + + // ClusterMetadata contains metadata information about the installed cluster. + ClusterMetadata *ClusterMetadata `json:"clusterMetadata,omitempty"` + + // Installed is true if the cluster has been installed + // +optional + Installed bool `json:"installed"` + + // Provisioning contains settings used only for initial cluster provisioning. + // May be unset in the case of adopted clusters. + Provisioning *Provisioning `json:"provisioning,omitempty"` + + // ClusterPoolRef is a reference to the ClusterPool that this ClusterDeployment originated from. + // +optional + ClusterPoolRef *ClusterPoolReference `json:"clusterPoolRef,omitempty"` + + // PowerState indicates whether a cluster should be running or hibernating. When omitted, + // PowerState defaults to the Running state. + // +optional + PowerState ClusterPowerState `json:"powerState,omitempty"` + + // HibernateAfter will transition a cluster to hibernating power state after it has been running for the + // given duration. The time that a cluster has been running is the time since the cluster was installed or the + // time since the cluster last came out of hibernation. + // +optional + HibernateAfter *metav1.Duration `json:"hibernateAfter,omitempty"` + + // InstallAttemptsLimit is the maximum number of times Hive will attempt to install the cluster. + // +optional + InstallAttemptsLimit *int32 `json:"installAttemptsLimit,omitempty"` +} + +// Provisioning contains settings used only for initial cluster provisioning. +type Provisioning struct { + // InstallConfigSecretRef is the reference to a secret that contains an openshift-install + // InstallConfig. This file will be passed through directly to the installer. + // Any version of InstallConfig can be used, provided it can be parsed by the openshift-install + // version for the release you are provisioning. + // +optional + InstallConfigSecretRef *corev1.LocalObjectReference `json:"installConfigSecretRef,omitempty"` + + // ReleaseImage is the image containing metadata for all components that run in the cluster, and + // is the primary and best way to specify what specific version of OpenShift you wish to install. + ReleaseImage string `json:"releaseImage,omitempty"` + + // ImageSetRef is a reference to a ClusterImageSet. If a value is specified for ReleaseImage, + // that will take precedence over the one from the ClusterImageSet. + ImageSetRef *ClusterImageSetReference `json:"imageSetRef,omitempty"` + + // ManifestsConfigMapRef is a reference to user-provided manifests to + // add to or replace manifests that are generated by the installer. + ManifestsConfigMapRef *corev1.LocalObjectReference `json:"manifestsConfigMapRef,omitempty"` + + // SSHPrivateKeySecretRef is the reference to the secret that contains the private SSH key to use + // for access to compute instances. This private key should correspond to the public key included + // in the InstallConfig. The private key is used by Hive to gather logs on the target cluster if + // there are install failures. + // The SSH private key is expected to be in the secret data under the "ssh-privatekey" key. + // +optional + SSHPrivateKeySecretRef *corev1.LocalObjectReference `json:"sshPrivateKeySecretRef,omitempty"` + + // SSHKnownHosts are known hosts to be configured in the hive install manager pod to avoid ssh prompts. + // Use of ssh in the install pod is somewhat limited today (failure log gathering from cluster, some bare metal + // provisioning scenarios), so this setting is often not needed. + SSHKnownHosts []string `json:"sshKnownHosts,omitempty"` + + // InstallerEnv are extra environment variables to pass through to the installer. This may be used to enable + // additional features of the installer. + // +optional + InstallerEnv []corev1.EnvVar `json:"installerEnv,omitempty"` + + // InstallStrategy provides platform agnostic configuration for the use of alternate install strategies. + // Defaults to openshift-install if none specified. + // +optional + InstallStrategy *InstallStrategy `json:"installStrategy,omitempty"` +} + +// ClusterImageSetReference is a reference to a ClusterImageSet +type ClusterImageSetReference struct { + // Name is the name of the ClusterImageSet that this refers to + Name string `json:"name"` +} + +// ClusterPoolReference is a reference to a ClusterPool +type ClusterPoolReference struct { + // Namespace is the namespace where the ClusterPool resides. + Namespace string `json:"namespace"` + // PoolName is the name of the ClusterPool for which the cluster was created. + PoolName string `json:"poolName"` + // ClaimName is the name of the ClusterClaim that claimed the cluster from the pool. + // +optional + ClaimName string `json:"claimName,omitempty"` +} + +// ClusterMetadata contains metadata information about the installed cluster. +type ClusterMetadata struct { + + // ClusterID is a globally unique identifier for this cluster generated during installation. Used for reporting metrics among other places. + ClusterID string `json:"clusterID"` + + // InfraID is an identifier for this cluster generated during installation and used for tagging/naming resources in cloud providers. + InfraID string `json:"infraID"` + + // AdminKubeconfigSecretRef references the secret containing the admin kubeconfig for this cluster. + AdminKubeconfigSecretRef corev1.LocalObjectReference `json:"adminKubeconfigSecretRef"` + + // AdminPasswordSecretRef references the secret containing the admin username/password which can be used to login to this cluster. + AdminPasswordSecretRef corev1.LocalObjectReference `json:"adminPasswordSecretRef"` +} + +// ClusterDeploymentStatus defines the observed state of ClusterDeployment +type ClusterDeploymentStatus struct { + + // InstallRestarts is the total count of container restarts on the clusters install job. + InstallRestarts int `json:"installRestarts,omitempty"` + + // APIURL is the URL where the cluster's API can be accessed. + APIURL string `json:"apiURL,omitempty"` + + // WebConsoleURL is the URL for the cluster's web console UI. + WebConsoleURL string `json:"webConsoleURL,omitempty"` + + // InstallerImage is the name of the installer image to use when installing the target cluster + // +optional + InstallerImage *string `json:"installerImage,omitempty"` + + // CLIImage is the name of the oc cli image to use when installing the target cluster + // +optional + CLIImage *string `json:"cliImage,omitempty"` + + // Conditions includes more detailed status for the cluster deployment + // +optional + Conditions []ClusterDeploymentCondition `json:"conditions,omitempty"` + + // CertificateBundles contains of the status of the certificate bundles associated with this cluster deployment. + // +optional + CertificateBundles []CertificateBundleStatus `json:"certificateBundles,omitempty"` + + // TODO: Use of *Timestamp fields here is slightly off from latest API conventions, + // should use InstalledTime instead if we ever get to a V2 of the API. + + // InstallStartedTimestamp is the time when all pre-requisites were met and cluster installation was launched. + InstallStartedTimestamp *metav1.Time `json:"installStartedTimestamp,omitempty"` + + // InstalledTimestamp is the time we first detected that the cluster has been successfully installed. + InstalledTimestamp *metav1.Time `json:"installedTimestamp,omitempty"` + + // ProvisionRef is a reference to the last ClusterProvision created for the deployment + // +optional + ProvisionRef *corev1.LocalObjectReference `json:"provisionRef,omitempty"` + + // InstallStrategy contains observed state from specific install strategies. + // +optional + InstallStrategy *InstallStrategyStatus `json:"installStrategy,omitempty"` +} + +// InstallStrategyStatus contains observed state from specific install strategies. +type InstallStrategyStatus struct { + + // Agent defines the observed state of the Agent install strategy for this cluster. + // +optional + Agent *agent.InstallStrategyStatus `json:"agent,omitempty"` +} + +// ClusterDeploymentCondition contains details for the current condition of a cluster deployment +type ClusterDeploymentCondition struct { + // Type is the type of the condition. + Type ClusterDeploymentConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterDeploymentConditionType is a valid value for ClusterDeploymentCondition.Type +type ClusterDeploymentConditionType string + +// WARNING: All ClusterDeploymentConditionTypes should be added to the AllClusterDeploymentConditions slice below. +const ( + // ClusterImageSetNotFoundCondition is set when the ClusterImageSet referenced by the + // ClusterDeployment is not found. + ClusterImageSetNotFoundCondition ClusterDeploymentConditionType = "ClusterImageSetNotFound" + + // InstallerImageResolutionFailedCondition is a condition that indicates whether the job + // to determine the installer image based on a release image was successful. + InstallerImageResolutionFailedCondition ClusterDeploymentConditionType = "InstallerImageResolutionFailed" + + // ControlPlaneCertificateNotFoundCondition is set when a control plane certificate bundle + // is not available, preventing the target cluster's control plane from being configured with + // certificates. + ControlPlaneCertificateNotFoundCondition ClusterDeploymentConditionType = "ControlPlaneCertificateNotFound" + + // IngressCertificateNotFoundCondition is a condition indicating that one of the CertificateBundle + // secrets required by an Ingress is not available. + IngressCertificateNotFoundCondition ClusterDeploymentConditionType = "IngressCertificateNotFound" + + // UnreachableCondition indicates that Hive is unable to establish an API connection to the remote cluster. + UnreachableCondition ClusterDeploymentConditionType = "Unreachable" + + // ActiveAPIURLOverrideCondition indicates that Hive is communicating with the remote cluster using the + // API URL override. + ActiveAPIURLOverrideCondition ClusterDeploymentConditionType = "ActiveAPIURLOverride" + + // DNSNotReadyCondition indicates that the the DNSZone object created for the clusterDeployment + // (ie manageDNS==true) has not yet indicated that the DNS zone is successfully responding to queries. + DNSNotReadyCondition ClusterDeploymentConditionType = "DNSNotReady" + + // InstallImagesResolvedCondition indicates that the the install images for the clusterDeployment + // have been not been resolved. This usually includes the installer and OpenShift cli images. + InstallImagesNotResolvedCondition ClusterDeploymentConditionType = "InstallImagesNotResolved" + + // ProvisionFailedCondition indicates that a provision failed + ProvisionFailedCondition ClusterDeploymentConditionType = "ProvisionFailed" + + // SyncSetFailedCondition indicates if any syncset for a cluster deployment failed + SyncSetFailedCondition ClusterDeploymentConditionType = "SyncSetFailed" + + // RelocationFailedCondition indicates if a relocation to another Hive instance has failed + RelocationFailedCondition ClusterDeploymentConditionType = "RelocationFailed" + + // ClusterHibernatingCondition is set when the ClusterDeployment is either + // transitioning to/from a hibernating state or is in a hibernating state. + ClusterHibernatingCondition ClusterDeploymentConditionType = "Hibernating" + + // InstallLaunchErrorCondition is set when a cluster provision fails to launch an install pod + InstallLaunchErrorCondition ClusterDeploymentConditionType = "InstallLaunchError" + + // DeprovisionLaunchErrorCondition is set when a cluster deprovision fails to launch. + DeprovisionLaunchErrorCondition ClusterDeploymentConditionType = "DeprovisionLaunchError" + + // ProvisionStoppedCondition is set when cluster provisioning is stopped + ProvisionStoppedCondition ClusterDeploymentConditionType = "ProvisionStopped" + + // AuthenticationFailureCondition is true when platform credentials cannot be used because of authentication failure + AuthenticationFailureClusterDeploymentCondition ClusterDeploymentConditionType = "AuthenticationFailure" +) + +// AllClusterDeploymentConditions is a slice containing all condition types. This can be used for dealing with +// cluster deployment conditions dynamically. +var AllClusterDeploymentConditions = []ClusterDeploymentConditionType{ + ClusterImageSetNotFoundCondition, + InstallerImageResolutionFailedCondition, + ControlPlaneCertificateNotFoundCondition, + IngressCertificateNotFoundCondition, + UnreachableCondition, + ActiveAPIURLOverrideCondition, + DNSNotReadyCondition, + ProvisionFailedCondition, + SyncSetFailedCondition, + RelocationFailedCondition, + ClusterHibernatingCondition, + InstallLaunchErrorCondition, +} + +// Cluster hibernating reasons +const ( + // ResumingHibernationReason is used as the reason when the cluster is transitioning + // from a Hibernating state to a Running state. + ResumingHibernationReason = "Resuming" + // RunningHibernationReason is used as the reason when the cluster is running and + // the Hibernating condition is false. + RunningHibernationReason = "Running" + // StoppingHibernationReason is used as the reason when the cluster is transitioning + // from a Running state to a Hibernating state. + StoppingHibernationReason = "Stopping" + // HibernatingHibernationReason is used as the reason when the cluster is in a + // Hibernating state. + HibernatingHibernationReason = "Hibernating" + // UnsupportedHibernationReason is used as the reason when the cluster spec + // specifies that the cluster be moved to a Hibernating state, but either the cluster + // version is not compatible with hibernation (< 4.4.8) or the cloud provider of + // the cluster is not supported. + UnsupportedHibernationReason = "Unsupported" + // FailedToStopHibernationReason is used when there was an error stopping machines + // to enter hibernation + FailedToStopHibernationReason = "FailedToStop" + // FailedToStartHibernationReason is used when there was an error starting machines + // to leave hibernation + FailedToStartHibernationReason = "FailedToStart" + // SyncSetsNotAppliedReason is used as the reason when SyncSets have not yet been applied + // for the cluster based on ClusterSync.Status.FirstSucessTime + SyncSetsNotAppliedReason = "SyncSetsNotApplied" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeployment is the Schema for the clusterdeployments API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Platform",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-platform" +// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-region" +// +kubebuilder:printcolumn:name="ClusterType",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-type" +// +kubebuilder:printcolumn:name="Installed",type="boolean",JSONPath=".spec.installed" +// +kubebuilder:printcolumn:name="InfraID",type="string",JSONPath=".spec.clusterMetadata.infraID" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/version-major-minor-patch" +// +kubebuilder:printcolumn:name="PowerState",type="string",JSONPath=".status.conditions[?(@.type=='Hibernating')].reason" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:path=clusterdeployments,shortName=cd,scope=Namespaced +type ClusterDeployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeploymentSpec `json:"spec,omitempty"` + Status ClusterDeploymentStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentList contains a list of ClusterDeployment +type ClusterDeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterDeployment `json:"items"` +} + +// Platform is the configuration for the specific platform upon which to perform +// the installation. Only one of the platform configuration should be set. +type Platform struct { + // AWS is the configuration used when installing on AWS. + AWS *aws.Platform `json:"aws,omitempty"` + + // Azure is the configuration used when installing on Azure. + // +optional + Azure *azure.Platform `json:"azure,omitempty"` + + // BareMetal is the configuration used when installing on bare metal. + BareMetal *baremetal.Platform `json:"baremetal,omitempty"` + + // GCP is the configuration used when installing on Google Cloud Platform. + // +optional + GCP *gcp.Platform `json:"gcp,omitempty"` + + // OpenStack is the configuration used when installing on OpenStack + OpenStack *openstack.Platform `json:"openstack,omitempty"` + + // VSphere is the configuration used when installing on vSphere + VSphere *vsphere.Platform `json:"vsphere,omitempty"` + + // Ovirt is the configuration used when installing on oVirt + Ovirt *ovirt.Platform `json:"ovirt,omitempty"` + + // AgentBareMetal is the configuration used when performing an Assisted Agent based installation + // to bare metal. Can only be used with the Assisted InstallStrategy. + AgentBareMetal *agent.BareMetalPlatform `json:"agentBareMetal,omitempty"` +} + +// InstallStrategy provides configuration for optional alternative install strategies. +type InstallStrategy struct { + + // Agent is the install strategy configuration for provisioning a cluster with the + // Agent based assisted installer. + Agent *agent.InstallStrategy `json:"agent,omitempty"` +} + +// ClusterIngress contains the configurable pieces for any ClusterIngress objects +// that should exist on the cluster. +type ClusterIngress struct { + // Name of the ClusterIngress object to create. + // +required + Name string `json:"name"` + + // Domain (sometimes referred to as shard) is the full DNS suffix that the resulting + // IngressController object will service (eg abcd.mycluster.mydomain.com). + // +required + Domain string `json:"domain"` + + // NamespaceSelector allows filtering the list of namespaces serviced by the + // ingress controller. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // RouteSelector allows filtering the set of Routes serviced by the ingress controller + // +optional + RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` + + // ServingCertificate references a CertificateBundle in the ClusterDeployment.Spec that + // should be used for this Ingress + // +optional + ServingCertificate string `json:"servingCertificate,omitempty"` +} + +// ControlPlaneConfigSpec contains additional configuration settings for a target +// cluster's control plane. +type ControlPlaneConfigSpec struct { + // ServingCertificates specifies serving certificates for the control plane + // +optional + ServingCertificates ControlPlaneServingCertificateSpec `json:"servingCertificates,omitempty"` + + // APIURLOverride is the optional URL override to which Hive will transition for communication with the API + // server of the remote cluster. When a remote cluster is created, Hive will initially communicate using the + // API URL established during installation. If an API URL Override is specified, Hive will periodically attempt + // to connect to the remote cluster using the override URL. Once Hive has determined that the override URL is + // active, Hive will use the override URL for further communications with the API server of the remote cluster. + // +optional + APIURLOverride string `json:"apiURLOverride,omitempty"` +} + +// ControlPlaneServingCertificateSpec specifies serving certificate settings for +// the control plane of the target cluster. +type ControlPlaneServingCertificateSpec struct { + // Default references the name of a CertificateBundle in the ClusterDeployment that should be + // used for the control plane's default endpoint. + // +optional + Default string `json:"default,omitempty"` + + // Additional is a list of additional domains and certificates that are also associated with + // the control plane's api endpoint. + // +optional + Additional []ControlPlaneAdditionalCertificate `json:"additional,omitempty"` +} + +// ControlPlaneAdditionalCertificate defines an additional serving certificate for a control plane +type ControlPlaneAdditionalCertificate struct { + // Name references a CertificateBundle in the ClusterDeployment.Spec that should be + // used for this additional certificate. + Name string `json:"name"` + + // Domain is the domain of the additional control plane certificate + Domain string `json:"domain"` +} + +// CertificateBundleSpec specifies a certificate bundle associated with a cluster deployment +type CertificateBundleSpec struct { + // Name is an identifier that must be unique within the bundle and must be referenced by + // an ingress or by the control plane serving certs + // +required + Name string `json:"name"` + + // Generate indicates whether this bundle should have real certificates generated for it. + // +optional + Generate bool `json:"generate,omitempty"` + + // CertificateSecretRef is the reference to the secret that contains the certificate bundle. If + // the certificate bundle is to be generated, it will be generated with the name in this + // reference. Otherwise, it is expected that the secret should exist in the same namespace + // as the ClusterDeployment + CertificateSecretRef corev1.LocalObjectReference `json:"certificateSecretRef"` +} + +// CertificateBundleStatus specifies whether a certificate bundle was generated for this +// cluster deployment. +type CertificateBundleStatus struct { + // Name of the certificate bundle + Name string `json:"name"` + + // Generated indicates whether the certificate bundle was generated + Generated bool `json:"generated"` +} + +// RelocateStatus is the status of a cluster relocate. +// This is used in the value of the "hive.openshift.io/relocate" annotation. +type RelocateStatus string + +const ( + // RelocateOutgoing indicates that a resource is on the source side of an in-progress relocate + RelocateOutgoing RelocateStatus = "outgoing" + // RelocateComplete indicates that a resource is on the source side of a completed relocate + RelocateComplete RelocateStatus = "complete" + // RelocateIncoming indicates that a resource is on the destination side of an in-progress relocate + RelocateIncoming RelocateStatus = "incoming" +) + +func init() { + SchemeBuilder.Register(&ClusterDeployment{}, &ClusterDeploymentList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterdeprovision_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterdeprovision_types.go new file mode 100644 index 00000000000..69bdbf46da1 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterdeprovision_types.go @@ -0,0 +1,164 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterDeprovisionSpec defines the desired state of ClusterDeprovision +type ClusterDeprovisionSpec struct { + // InfraID is the identifier generated during installation for a cluster. It is used for tagging/naming resources in cloud providers. + InfraID string `json:"infraID"` + + // ClusterID is a globally unique identifier for the cluster to deprovision. It will be used if specified. + ClusterID string `json:"clusterID,omitempty"` + + // Platform contains platform-specific configuration for a ClusterDeprovision + Platform ClusterDeprovisionPlatform `json:"platform,omitempty"` +} + +// ClusterDeprovisionStatus defines the observed state of ClusterDeprovision +type ClusterDeprovisionStatus struct { + // Completed is true when the uninstall has completed successfully + Completed bool `json:"completed,omitempty"` + + // Conditions includes more detailed status for the cluster deprovision + // +optional + Conditions []ClusterDeprovisionCondition `json:"conditions,omitempty"` +} + +// ClusterDeprovisionPlatform contains platform-specific configuration for the +// deprovision +type ClusterDeprovisionPlatform struct { + // AWS contains AWS-specific deprovision settings + AWS *AWSClusterDeprovision `json:"aws,omitempty"` + // Azure contains Azure-specific deprovision settings + Azure *AzureClusterDeprovision `json:"azure,omitempty"` + // GCP contains GCP-specific deprovision settings + GCP *GCPClusterDeprovision `json:"gcp,omitempty"` + // OpenStack contains OpenStack-specific deprovision settings + OpenStack *OpenStackClusterDeprovision `json:"openstack,omitempty"` + // VSphere contains VMWare vSphere-specific deprovision settings + VSphere *VSphereClusterDeprovision `json:"vsphere,omitempty"` + // Ovirt contains oVirt-specific deprovision settings + Ovirt *OvirtClusterDeprovision `json:"ovirt,omitempty"` +} + +// AWSClusterDeprovision contains AWS-specific configuration for a ClusterDeprovision +type AWSClusterDeprovision struct { + // Region is the AWS region for this deprovisioning + Region string `json:"region"` + + // CredentialsSecretRef is the AWS account credentials to use for deprovisioning the cluster + CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"` +} + +// AzureClusterDeprovision contains Azure-specific configuration for a ClusterDeprovision +type AzureClusterDeprovision struct { + // CredentialsSecretRef is the Azure account credentials to use for deprovisioning the cluster + CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"` +} + +// GCPClusterDeprovision contains GCP-specific configuration for a ClusterDeprovision +type GCPClusterDeprovision struct { + // Region is the GCP region for this deprovision + Region string `json:"region"` + // CredentialsSecretRef is the GCP account credentials to use for deprovisioning the cluster + CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"` +} + +// OpenStackClusterDeprovision contains OpenStack-specific configuration for a ClusterDeprovision +type OpenStackClusterDeprovision struct { + // Cloud is the secion in the clouds.yaml secret below to use for auth/connectivity. + Cloud string `json:"cloud"` + // CredentialsSecretRef is the OpenStack account credentials to use for deprovisioning the cluster + CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"` + // CertificatesSecretRef refers to a secret that contains CA certificates + // necessary for communicating with the OpenStack. + // + // +optional + CertificatesSecretRef *corev1.LocalObjectReference `json:"certificatesSecretRef,omitempty"` +} + +// VSphereClusterDeprovision contains VMware vSphere-specific configuration for a ClusterDeprovision +type VSphereClusterDeprovision struct { + // CredentialsSecretRef is the vSphere account credentials to use for deprovisioning the cluster + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + // CertificatesSecretRef refers to a secret that contains the vSphere CA certificates + // necessary for communicating with the VCenter. + CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"` + // VCenter is the vSphere vCenter hostname. + VCenter string `json:"vCenter"` +} + +// OvirtClusterDeprovision contains oVirt-specific configuration for a ClusterDeprovision +type OvirtClusterDeprovision struct { + // The oVirt cluster ID + ClusterID string `json:"clusterID"` + // CredentialsSecretRef is the oVirt account credentials to use for deprovisioning the cluster + // secret fields: ovirt_url, ovirt_username, ovirt_password, ovirt_ca_bundle + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + // CertificatesSecretRef refers to a secret that contains the oVirt CA certificates + // necessary for communicating with the oVirt. + CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeprovision is the Schema for the clusterdeprovisions API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="InfraID",type="string",JSONPath=".spec.infraID" +// +kubebuilder:printcolumn:name="ClusterID",type="string",JSONPath=".spec.clusterID" +// +kubebuilder:printcolumn:name="Completed",type="boolean",JSONPath=".status.completed" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:path=clusterdeprovisions,shortName=cdr,scope=Namespaced +type ClusterDeprovision struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeprovisionSpec `json:"spec,omitempty"` + Status ClusterDeprovisionStatus `json:"status,omitempty"` +} + +// ClusterDeprovisionCondition contains details for the current condition of a ClusterDeprovision +type ClusterDeprovisionCondition struct { + // Type is the type of the condition. + Type ClusterDeprovisionConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterDeprovisionConditionType is a valid value for ClusterDeprovisionCondition.Type +type ClusterDeprovisionConditionType string + +const ( + // AuthenticationFailureClusterDeprovisionCondition is true when credentials cannot be used because of authentication failure + AuthenticationFailureClusterDeprovisionCondition ClusterDeprovisionConditionType = "AuthenticationFailure" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeprovisionList contains a list of ClusterDeprovision +type ClusterDeprovisionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterDeprovision `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterDeprovision{}, &ClusterDeprovisionList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterimageset_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterimageset_types.go new file mode 100644 index 00000000000..04bc275cfe6 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterimageset_types.go @@ -0,0 +1,46 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterImageSetSpec defines the desired state of ClusterImageSet +type ClusterImageSetSpec struct { + // ReleaseImage is the image that contains the payload to use when installing + // a cluster. + ReleaseImage string `json:"releaseImage"` +} + +// ClusterImageSetStatus defines the observed state of ClusterImageSet +type ClusterImageSetStatus struct{} + +// +genclient:nonNamespaced +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImageSet is the Schema for the clusterimagesets API +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Release",type="string",JSONPath=".spec.releaseImage" +// +kubebuilder:resource:path=clusterimagesets,shortName=imgset,scope=Cluster +type ClusterImageSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterImageSetSpec `json:"spec,omitempty"` + Status ClusterImageSetStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImageSetList contains a list of ClusterImageSet +type ClusterImageSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterImageSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterImageSet{}, &ClusterImageSetList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterpool_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterpool_types.go new file mode 100644 index 00000000000..e9cb2972bc1 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterpool_types.go @@ -0,0 +1,161 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterPoolSpec defines the desired state of the ClusterPool. +type ClusterPoolSpec struct { + + // Platform encompasses the desired platform for the cluster. + // +required + Platform Platform `json:"platform"` + + // PullSecretRef is the reference to the secret to use when pulling images. + // +optional + PullSecretRef *corev1.LocalObjectReference `json:"pullSecretRef,omitempty"` + + // Size is the default number of clusters that we should keep provisioned and waiting for use. + // +kubebuilder:validation:Minimum=0 + // +required + Size int32 `json:"size"` + + // MaxSize is the maximum number of clusters that will be provisioned including clusters that have been claimed + // and ones waiting to be used. + // By default there is no limit. + // +optional + MaxSize *int32 `json:"maxSize,omitempty"` + + // MaxConcurrent is the maximum number of clusters that will be provisioned or deprovisioned at an time. This includes the + // claimed clusters being deprovisioned. + // By default there is no limit. + // +optional + MaxConcurrent *int32 `json:"maxConcurrent,omitempty"` + + // BaseDomain is the base domain to use for all clusters created in this pool. + // +required + BaseDomain string `json:"baseDomain"` + + // ImageSetRef is a reference to a ClusterImageSet. The release image specified in the ClusterImageSet will be used + // by clusters created for this cluster pool. + ImageSetRef ClusterImageSetReference `json:"imageSetRef"` + + // Labels to be applied to new ClusterDeployments created for the pool. ClusterDeployments that have already been + // claimed will not be affected when this value is modified. + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // InstallConfigSecretTemplateRef is a secret with the key install-config.yaml consisting of the content of the install-config.yaml + // to be used as a template for all clusters in this pool. + // Cluster specific settings (name, basedomain) will be injected dynamically when the ClusterDeployment install-config Secret is generated. + // +optional + InstallConfigSecretTemplateRef *corev1.LocalObjectReference `json:"installConfigSecretTemplateRef,omitempty"` + + // HibernateAfter will be applied to new ClusterDeployments created for the pool. HibernateAfter will transition + // clusters in the clusterpool to hibernating power state after it has been running for the given duration. The time + // that a cluster has been running is the time since the cluster was installed or the time since the cluster last came + // out of hibernation. + // +optional + HibernateAfter *metav1.Duration `json:"hibernateAfter,omitempty"` + + // SkipMachinePools allows creating clusterpools where the machinepools are not managed by hive after cluster creation + // +optional + SkipMachinePools bool `json:"skipMachinePools,omitempty"` + + // ClaimLifetime defines the lifetimes for claims for the cluster pool. + // +optional + ClaimLifetime *ClusterPoolClaimLifetime `json:"claimLifetime,omitempty"` +} + +// ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool. +type ClusterPoolClaimLifetime struct { + // Default is the default lifetime of the claim when no lifetime is set on the claim itself. + // +optional + Default *metav1.Duration `json:"default,omitempty"` + + // Maximum is the maximum lifetime of the claim after it is assigned a cluster. If the claim still exists + // when the lifetime has elapsed, the claim will be deleted by Hive. + // The lifetime of a claim is the mimimum of the lifetimes set by the cluster pool and the claim itself. + // +optional + Maximum *metav1.Duration `json:"maximum,omitempty"` +} + +// ClusterPoolStatus defines the observed state of ClusterPool +type ClusterPoolStatus struct { + // Size is the number of unclaimed clusters that have been created for the pool. + Size int32 `json:"size"` + + // Ready is the number of unclaimed clusters that have been installed and are ready to be claimed. + Ready int32 `json:"ready"` + + // Conditions includes more detailed status for the cluster pool + // +optional + Conditions []ClusterPoolCondition `json:"conditions,omitempty"` +} + +// ClusterPoolCondition contains details for the current condition of a cluster pool +type ClusterPoolCondition struct { + // Type is the type of the condition. + Type ClusterPoolConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterPoolConditionType is a valid value for ClusterPoolCondition.Type +type ClusterPoolConditionType string + +const ( + // ClusterPoolMissingDependenciesCondition is set when a cluster pool is missing dependencies required to create a + // cluster. Dependencies include resources such as the ClusterImageSet and the credentials Secret. + ClusterPoolMissingDependenciesCondition ClusterPoolConditionType = "MissingDependencies" + // ClusterPoolCapacityAvailableCondition is set to provide information on whether the cluster pool has capacity + // available to create more clusters for the pool. + ClusterPoolCapacityAvailableCondition ClusterPoolConditionType = "CapacityAvailable" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterPool represents a pool of clusters that should be kept ready to be given out to users. Clusters are removed +// from the pool once claimed and then automatically replaced with a new one. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.size,statuspath=.status.size +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="Size",type="string",JSONPath=".spec.size" +// +kubebuilder:printcolumn:name="BaseDomain",type="string",JSONPath=".spec.baseDomain" +// +kubebuilder:printcolumn:name="ImageSet",type="string",JSONPath=".spec.imageSetRef.name" +// +kubebuilder:resource:path=clusterpools,shortName=cp +type ClusterPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterPoolSpec `json:"spec"` + Status ClusterPoolStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterPoolList contains a list of ClusterPools +type ClusterPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterPool `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterPool{}, &ClusterPoolList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterprovision_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterprovision_types.go new file mode 100644 index 00000000000..d0a9ea7f000 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterprovision_types.go @@ -0,0 +1,142 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ClusterProvisionSpec defines the results of provisioning a cluster. +type ClusterProvisionSpec struct { + + // ClusterDeploymentRef references the cluster deployment provisioned. + ClusterDeploymentRef corev1.LocalObjectReference `json:"clusterDeploymentRef"` + + // PodSpec is the spec to use for the installer pod. + PodSpec corev1.PodSpec `json:"podSpec"` + + // Attempt is which attempt number of the cluster deployment that this ClusterProvision is + Attempt int `json:"attempt"` + + // Stage is the stage of provisioning that the cluster deployment has reached. + Stage ClusterProvisionStage `json:"stage"` + + // ClusterID is a globally unique identifier for this cluster generated during installation. Used for reporting metrics among other places. + ClusterID *string `json:"clusterID,omitempty"` + + // InfraID is an identifier for this cluster generated during installation and used for tagging/naming resources in cloud providers. + InfraID *string `json:"infraID,omitempty"` + + // InstallLog is the log from the installer. + InstallLog *string `json:"installLog,omitempty"` + + // Metadata is the metadata.json generated by the installer, providing metadata information about the cluster created. + Metadata *runtime.RawExtension `json:"metadata,omitempty"` + + // AdminKubeconfigSecretRef references the secret containing the admin kubeconfig for this cluster. + AdminKubeconfigSecretRef *corev1.LocalObjectReference `json:"adminKubeconfigSecretRef,omitempty"` + + // AdminPasswordSecretRef references the secret containing the admin username/password which can be used to login to this cluster. + AdminPasswordSecretRef *corev1.LocalObjectReference `json:"adminPasswordSecretRef,omitempty"` + + // PrevClusterID is the cluster ID of the previous failed provision attempt. + PrevClusterID *string `json:"prevClusterID,omitempty"` + + // PrevInfraID is the infra ID of the previous failed provision attempt. + PrevInfraID *string `json:"prevInfraID,omitempty"` +} + +// ClusterProvisionStatus defines the observed state of ClusterProvision. +type ClusterProvisionStatus struct { + // JobRef is the reference to the job performing the provision. + JobRef *corev1.LocalObjectReference `json:"jobRef,omitempty"` + + // Conditions includes more detailed status for the cluster provision + // +optional + Conditions []ClusterProvisionCondition `json:"conditions,omitempty"` +} + +// ClusterProvisionStage is the stage of provisioning. +type ClusterProvisionStage string + +const ( + // ClusterProvisionStageInitializing indicates that pre-provision initialization is underway. + ClusterProvisionStageInitializing ClusterProvisionStage = "initializing" + // ClusterProvisionStageProvisioning indicates that the cluster provision is ongoing. + ClusterProvisionStageProvisioning ClusterProvisionStage = "provisioning" + // ClusterProvisionStageComplete indicates that the cluster provision completed successfully. + ClusterProvisionStageComplete ClusterProvisionStage = "complete" + // ClusterProvisionStageFailed indicates that the cluster provision failed. + ClusterProvisionStageFailed ClusterProvisionStage = "failed" +) + +// ClusterProvisionCondition contains details for the current condition of a cluster provision +type ClusterProvisionCondition struct { + // Type is the type of the condition. + Type ClusterProvisionConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterProvisionConditionType is a valid value for ClusterProvisionCondition.Type +type ClusterProvisionConditionType string + +const ( + // ClusterProvisionInitializedCondition is set when a cluster provision has finished initialization. + ClusterProvisionInitializedCondition ClusterProvisionConditionType = "ClusterProvisionInitialized" + + // ClusterProvisionCompletedCondition is set when a cluster provision completes. + ClusterProvisionCompletedCondition ClusterProvisionConditionType = "ClusterProvisionCompleted" + + // ClusterProvisionFailedCondition is set when a cluster provision fails. + ClusterProvisionFailedCondition ClusterProvisionConditionType = "ClusterProvisionFailed" + + // ClusterProvisionJobCreated is set when the install job is created for a cluster provision. + ClusterProvisionJobCreated ClusterProvisionConditionType = "ClusterProvisionJobCreated" + + // InstallPodStuckCondition is set when the install pod is stuck + InstallPodStuckCondition ClusterProvisionConditionType = "InstallPodStuck" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterProvision is the Schema for the clusterprovisions API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="ClusterDeployment",type="string",JSONPath=".spec.clusterDeploymentRef.name" +// +kubebuilder:printcolumn:name="Stage",type="string",JSONPath=".spec.stage" +// +kubebuilder:printcolumn:name="InfraID",type="string",JSONPath=".spec.infraID" +// +kubebuilder:resource:path=clusterprovisions,scope=Namespaced +type ClusterProvision struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterProvisionSpec `json:"spec,omitempty"` + Status ClusterProvisionStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterProvisionList contains a list of ClusterProvision +type ClusterProvisionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterProvision `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterProvision{}, &ClusterProvisionList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterrelocate_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterrelocate_types.go new file mode 100644 index 00000000000..ccc3a564894 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterrelocate_types.go @@ -0,0 +1,56 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterRelocateSpec defines the relocation of clusters from one Hive instance to another. +type ClusterRelocateSpec struct { + // KubeconfigSecretRef is a reference to the secret containing the kubeconfig for the destination Hive instance. + // The kubeconfig must be in a data field where the key is "kubeconfig". + KubeconfigSecretRef KubeconfigSecretReference `json:"kubeconfigSecretRef"` + + // ClusterDeploymentSelector is a LabelSelector indicating which clusters will be relocated. + ClusterDeploymentSelector metav1.LabelSelector `json:"clusterDeploymentSelector"` +} + +// KubeconfigSecretReference is a reference to a secret containing the kubeconfig for a remote cluster. +type KubeconfigSecretReference struct { + // Name is the name of the secret. + Name string `json:"name"` + // Namespace is the namespace where the secret lives. + Namespace string `json:"namespace"` +} + +// ClusterRelocateStatus defines the observed state of ClusterRelocate. +type ClusterRelocateStatus struct{} + +// +genclient:nonNamespaced +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRelocate is the Schema for the ClusterRelocates API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Selector",type="string",JSONPath=".spec.clusterDeploymentSelector" +// +kubebuilder:resource:path=clusterrelocates +type ClusterRelocate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterRelocateSpec `json:"spec,omitempty"` + Status ClusterRelocateStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRelocateList contains a list of ClusterRelocate +type ClusterRelocateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterRelocate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterRelocate{}, &ClusterRelocateList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterstate_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterstate_types.go new file mode 100644 index 00000000000..b2ccb69bc6b --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/clusterstate_types.go @@ -0,0 +1,59 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +// ClusterStateSpec defines the desired state of ClusterState +type ClusterStateSpec struct { +} + +// ClusterStateStatus defines the observed state of ClusterState +type ClusterStateStatus struct { + // LastUpdated is the last time that operator state was updated + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // ClusterOperators contains the state for every cluster operator in the + // target cluster + ClusterOperators []ClusterOperatorState `json:"clusterOperators,omitempty"` +} + +// ClusterOperatorState summarizes the status of a single cluster operator +type ClusterOperatorState struct { + // Name is the name of the cluster operator + Name string `json:"name"` + + // Conditions is the set of conditions in the status of the cluster operator + // on the target cluster + Conditions []configv1.ClusterOperatorStatusCondition `json:"conditions,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterState is the Schema for the clusterstates API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced +type ClusterState struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterStateSpec `json:"spec,omitempty"` + Status ClusterStateStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterStateList contains a list of ClusterState +type ClusterStateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterState `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterState{}, &ClusterStateList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/dnszone_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/dnszone_types.go new file mode 100644 index 00000000000..6d0378a7fef --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/dnszone_types.go @@ -0,0 +1,203 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // FinalizerDNSZone is used on DNSZones to ensure we successfully deprovision + // the cloud objects before cleaning up the API object. + FinalizerDNSZone string = "hive.openshift.io/dnszone" + + // FinalizerDNSEndpoint is used on DNSZones to ensure we successfully + // delete the parent-link records before cleaning up the API object. + FinalizerDNSEndpoint string = "hive.openshift.io/dnsendpoint" +) + +// DNSZoneSpec defines the desired state of DNSZone +type DNSZoneSpec struct { + // Zone is the DNS zone to host + Zone string `json:"zone"` + + // LinkToParentDomain specifies whether DNS records should + // be automatically created to link this DNSZone with a + // parent domain. + // +optional + LinkToParentDomain bool `json:"linkToParentDomain,omitempty"` + + // AWS specifies AWS-specific cloud configuration + // +optional + AWS *AWSDNSZoneSpec `json:"aws,omitempty"` + + // GCP specifies GCP-specific cloud configuration + // +optional + GCP *GCPDNSZoneSpec `json:"gcp,omitempty"` + + // Azure specifes Azure-specific cloud configuration + // +optional + Azure *AzureDNSZoneSpec `json:"azure,omitempty"` +} + +// AWSDNSZoneSpec contains AWS-specific DNSZone specifications +type AWSDNSZoneSpec struct { + // CredentialsSecretRef contains a reference to a secret that contains AWS credentials + // for CRUD operations + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // AdditionalTags is a set of additional tags to set on the DNS hosted zone. In addition + // to these tags,the DNS Zone controller will set a hive.openhsift.io/hostedzone tag + // identifying the HostedZone record that it belongs to. + AdditionalTags []AWSResourceTag `json:"additionalTags,omitempty"` + + // Region is the AWS region to use for route53 operations. + // This defaults to us-east-1. + // For AWS China, use cn-northwest-1. + // +optional + Region string `json:"region,omitempty"` +} + +// AWSResourceTag represents a tag that is applied to an AWS cloud resource +type AWSResourceTag struct { + // Key is the key for the tag + Key string `json:"key"` + // Value is the value for the tag + Value string `json:"value"` +} + +// GCPDNSZoneSpec contains GCP-specific DNSZone specifications +type GCPDNSZoneSpec struct { + // CredentialsSecretRef references a secret that will be used to authenticate with + // GCP CloudDNS. It will need permission to create and manage CloudDNS Hosted Zones. + // Secret should have a key named 'osServiceAccount.json'. + // The credentials must specify the project to use. + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` +} + +// AzureDNSZoneSpec contains Azure-specific DNSZone specifications +type AzureDNSZoneSpec struct { + // CredentialsSecretRef references a secret that will be used to authenticate with + // Azure CloudDNS. It will need permission to create and manage CloudDNS Hosted Zones. + // Secret should have a key named 'osServicePrincipal.json'. + // The credentials must specify the project to use. + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // ResourceGroupName specifies the Azure resource group in which the Hosted Zone should be created. + ResourceGroupName string `json:"resourceGroupName"` +} + +// DNSZoneStatus defines the observed state of DNSZone +type DNSZoneStatus struct { + // LastSyncTimestamp is the time that the zone was last sync'd. + // +optional + LastSyncTimestamp *metav1.Time `json:"lastSyncTimestamp,omitempty"` + + // LastSyncGeneration is the generation of the zone resource that was last sync'd. This is used to know + // if the Object has changed and we should sync immediately. + // +optional + LastSyncGeneration int64 `json:"lastSyncGeneration,omitempty"` + + // NameServers is a list of nameservers for this DNS zone + // +optional + NameServers []string `json:"nameServers,omitempty"` + + // AWSDNSZoneStatus contains status information specific to AWS + // +optional + AWS *AWSDNSZoneStatus `json:"aws,omitempty"` + + // GCPDNSZoneStatus contains status information specific to GCP + // +optional + GCP *GCPDNSZoneStatus `json:"gcp,omitempty"` + + // AzureDNSZoneStatus contains status information specific to Azure + Azure *AzureDNSZoneStatus `json:"azure,omitempty"` + + // Conditions includes more detailed status for the DNSZone + // +optional + Conditions []DNSZoneCondition `json:"conditions,omitempty"` +} + +// AWSDNSZoneStatus contains status information specific to AWS DNS zones +type AWSDNSZoneStatus struct { + // ZoneID is the ID of the zone in AWS + // +optional + ZoneID *string `json:"zoneID,omitempty"` +} + +// AzureDNSZoneStatus contains status information specific to Azure DNS zones +type AzureDNSZoneStatus struct { +} + +// GCPDNSZoneStatus contains status information specific to GCP Cloud DNS zones +type GCPDNSZoneStatus struct { + // ZoneName is the name of the zone in GCP Cloud DNS + // +optional + ZoneName *string `json:"zoneName,omitempty"` +} + +// DNSZoneCondition contains details for the current condition of a DNSZone +type DNSZoneCondition struct { + // Type is the type of the condition. + Type DNSZoneConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// DNSZoneConditionType is a valid value for DNSZoneCondition.Type +type DNSZoneConditionType string + +const ( + // ZoneAvailableDNSZoneCondition is true if the DNSZone is responding to DNS queries + ZoneAvailableDNSZoneCondition DNSZoneConditionType = "ZoneAvailable" + // ParentLinkCreatedCondition is true if the parent link has been created + ParentLinkCreatedCondition DNSZoneConditionType = "ParentLinkCreated" + // DomainNotManaged is true if we try to reconcile a DNSZone and the HiveConfig + // does not contain a ManagedDNS entry for the domain in the DNSZone + DomainNotManaged DNSZoneConditionType = "DomainNotManaged" + // InsufficientCredentialsCondition is true when credentials cannot be used to create a + // DNS zone because of insufficient permissions + InsufficientCredentialsCondition DNSZoneConditionType = "InsufficientCredentials" + // AuthenticationFailureCondition is true when credentials cannot be used to create a + // DNS zone because they fail authentication + AuthenticationFailureCondition DNSZoneConditionType = "AuthenticationFailure" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNSZone is the Schema for the dnszones API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced +type DNSZone struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DNSZoneSpec `json:"spec,omitempty"` + Status DNSZoneStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNSZoneList contains a list of DNSZone +type DNSZoneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DNSZone `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DNSZone{}, &DNSZoneList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/doc.go new file mode 100644 index 00000000000..717f59d8b61 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/doc.go @@ -0,0 +1,7 @@ +// Package v1 contains API Schema definitions for the hive v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hive +// +k8s:defaulter-gen=TypeMeta +// +groupName=hive.openshift.io +package v1 diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/clouduid.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/clouduid.go new file mode 100644 index 00000000000..7f9a99cd6e1 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/clouduid.go @@ -0,0 +1,13 @@ +package gcp + +import ( + "crypto/md5" + "fmt" +) + +// CloudControllerUID generates a UID used by the GCP cloud controller provider +// to generate certain load balancing resources +func CloudControllerUID(infraID string) string { + hash := md5.Sum([]byte(infraID)) + return fmt.Sprintf("%x", hash)[:16] +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/doc.go new file mode 100644 index 00000000000..9b5d26c20c1 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/doc.go @@ -0,0 +1,4 @@ +// Package gcp contains API Schema definitions for GCP clusters. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hive +package gcp diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/machinepools.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/machinepools.go new file mode 100644 index 00000000000..bce33ed330d --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/machinepools.go @@ -0,0 +1,26 @@ +package gcp + +// MachinePool stores the configuration for a machine pool installed on GCP. +type MachinePool struct { + // Zones is list of availability zones that can be used. + Zones []string `json:"zones,omitempty"` + + // InstanceType defines the GCP instance type. + // eg. n1-standard-4 + InstanceType string `json:"type"` +} + +// Set sets the values from `required` to `a`. +func (a *MachinePool) Set(required *MachinePool) { + if required == nil || a == nil { + return + } + + if len(required.Zones) > 0 { + a.Zones = required.Zones + } + + if required.InstanceType != "" { + a.InstanceType = required.InstanceType + } +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/metadata.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/metadata.go new file mode 100644 index 00000000000..fcdc59e6a2d --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/metadata.go @@ -0,0 +1,7 @@ +package gcp + +// Metadata contains GCP metadata (e.g. for uninstalling the cluster). +type Metadata struct { + Region string `json:"region"` + ProjectID string `json:"projectID"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/platform.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/platform.go new file mode 100644 index 00000000000..252caff3ce8 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/platform.go @@ -0,0 +1,16 @@ +package gcp + +import ( + corev1 "k8s.io/api/core/v1" +) + +// Platform stores all the global configuration that all machinesets +// use. +type Platform struct { + // CredentialsSecretRef refers to a secret that contains the GCP account access + // credentials. + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // Region specifies the GCP region where the cluster will be created. + Region string `json:"region"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/zz_generated.deepcopy.go new file mode 100644 index 00000000000..81690f05bb7 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/gcp/zz_generated.deepcopy.go @@ -0,0 +1,59 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package gcp + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/hiveconfig_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/hiveconfig_types.go new file mode 100644 index 00000000000..062d3b637b8 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/hiveconfig_types.go @@ -0,0 +1,400 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // FeatureGateAgentInstallStrategy enables the use of the alpha ClusterDeployment agent based + // install strategy and platforms. + FeatureGateAgentInstallStrategy = "AlphaAgentInstallStrategy" +) + +// HiveConfigSpec defines the desired state of Hive +type HiveConfigSpec struct { + + // TargetNamespace is the namespace where the core Hive components should be run. Defaults to "hive". Will be + // created if it does not already exist. All resource references in HiveConfig can be assumed to be in the + // TargetNamespace. + // +optional + TargetNamespace string `json:"targetNamespace,omitempty"` + + // ManagedDomains is the list of DNS domains that are managed by the Hive cluster + // When specifying 'manageDNS: true' in a ClusterDeployment, the ClusterDeployment's + // baseDomain should be a direct child of one of these domains, otherwise the + // ClusterDeployment creation will result in a validation error. + // +optional + ManagedDomains []ManageDNSConfig `json:"managedDomains,omitempty"` + + // AdditionalCertificateAuthoritiesSecretRef is a list of references to secrets in the + // TargetNamespace that contain an additional Certificate Authority to use when communicating + // with target clusters. These certificate authorities will be used in addition to any self-signed + // CA generated by each cluster on installation. + // +optional + AdditionalCertificateAuthoritiesSecretRef []corev1.LocalObjectReference `json:"additionalCertificateAuthoritiesSecretRef,omitempty"` + + // GlobalPullSecretRef is used to specify a pull secret that will be used globally by all of the cluster deployments. + // For each cluster deployment, the contents of GlobalPullSecret will be merged with the specific pull secret for + // a cluster deployment(if specified), with precedence given to the contents of the pull secret for the cluster deployment. + // The global pull secret is assumed to be in the TargetNamespace. + // +optional + GlobalPullSecretRef *corev1.LocalObjectReference `json:"globalPullSecretRef,omitempty"` + + // Backup specifies configuration for backup integration. + // If absent, backup integration will be disabled. + // +optional + Backup BackupConfig `json:"backup,omitempty"` + + // FailedProvisionConfig is used to configure settings related to handling provision failures. + // +optional + FailedProvisionConfig FailedProvisionConfig `json:"failedProvisionConfig,omitempty"` + + // LogLevel is the level of logging to use for the Hive controllers. + // Acceptable levels, from coarsest to finest, are panic, fatal, error, warn, info, debug, and trace. + // The default level is info. + // +optional + LogLevel string `json:"logLevel,omitempty"` + + // SyncSetReapplyInterval is a string duration indicating how much time must pass before SyncSet resources + // will be reapplied. + // The default reapply interval is two hours. + SyncSetReapplyInterval string `json:"syncSetReapplyInterval,omitempty"` + + // MaintenanceMode can be set to true to disable the hive controllers in situations where we need to ensure + // nothing is running that will add or act upon finalizers on Hive types. This should rarely be needed. + // Sets replicas to 0 for the hive-controllers deployment to accomplish this. + MaintenanceMode *bool `json:"maintenanceMode,omitempty"` + + // DeprovisionsDisabled can be set to true to block deprovision jobs from running. + DeprovisionsDisabled *bool `json:"deprovisionsDisabled,omitempty"` + + // DeleteProtection can be set to "enabled" to turn on automatic delete protection for ClusterDeployments. When + // enabled, Hive will add the "hive.openshift.io/protected-delete" annotation to new ClusterDeployments. Once a + // ClusterDeployment has been installed, a user must remove the annotation from a ClusterDeployment prior to + // deleting it. + // +kubebuilder:validation:Enum=enabled + // +optional + DeleteProtection DeleteProtectionType `json:"deleteProtection,omitempty"` + + // DisabledControllers allows selectively disabling Hive controllers by name. + // The name of an individual controller matches the name of the controller as seen in the Hive logging output. + DisabledControllers []string `json:"disabledControllers,omitempty"` + + // ControllersConfig is used to configure different hive controllers + // +optional + ControllersConfig *ControllersConfig `json:"controllersConfig,omitempty"` + + FeatureGates *FeatureGateSelection `json:"featureGates,omitempty"` +} + +// FeatureSet defines the set of feature gates that should be used. +// +kubebuilder:validation:Enum="";Custom +type FeatureSet string + +var ( + // DefaultFeatureSet feature set is the default things supported as part of normal supported platform. + DefaultFeatureSet FeatureSet = "" + + // CustomFeatureSet allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // it might leave object in a state that is unrecoverable. + CustomFeatureSet FeatureSet = "Custom" +) + +// FeatureGateSelection allows selecting feature gates for the controller. +type FeatureGateSelection struct { + // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. + // +unionDiscriminator + // +optional + FeatureSet FeatureSet `json:"featureSet,omitempty"` + + // custom allows the enabling or disabling of any feature. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // might cause unknown behavior. featureSet must equal "Custom" must be set to use this field. + // +optional + // +nullable + Custom *FeatureGatesEnabled `json:"custom,omitempty"` +} + +// FeatureGatesEnabled is list of feature gates that must be enabled. +type FeatureGatesEnabled struct { + // enabled is a list of all feature gates that you want to force on + // +optional + Enabled []string `json:"enabled,omitempty"` +} + +// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature. +var FeatureSets = map[FeatureSet]*FeatureGatesEnabled{ + DefaultFeatureSet: { + Enabled: []string{}, + }, + CustomFeatureSet: { + Enabled: []string{}, + }, +} + +// HiveConfigStatus defines the observed state of Hive +type HiveConfigStatus struct { + // AggregatorClientCAHash keeps an md5 hash of the aggregator client CA + // configmap data from the openshift-config-managed namespace. When the configmap changes, + // admission is redeployed. + AggregatorClientCAHash string `json:"aggregatorClientCAHash,omitempty"` + + // ObservedGeneration will record the most recently processed HiveConfig object's generation. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // ConfigApplied will be set by the hive operator to indicate whether or not the LastGenerationObserved + // was successfully reconciled. + ConfigApplied bool `json:"configApplied,omitempty"` +} + +// BackupConfig contains settings for the Velero backup integration. +type BackupConfig struct { + // Velero specifies configuration for the Velero backup integration. + // +optional + Velero VeleroBackupConfig `json:"velero,omitempty"` + + // MinBackupPeriodSeconds specifies that a minimum of MinBackupPeriodSeconds will occur in between each backup. + // This is used to rate limit backups. This potentially batches together multiple changes into 1 backup. + // No backups will be lost as changes that happen during this interval are queued up and will result in a + // backup happening once the interval has been completed. + // +optional + MinBackupPeriodSeconds *int `json:"minBackupPeriodSeconds,omitempty"` +} + +// VeleroBackupConfig contains settings for the Velero backup integration. +type VeleroBackupConfig struct { + // Enabled dictates if Velero backup integration is enabled. + // If not specified, the default is disabled. + // +optional + Enabled bool `json:"enabled,omitempty"` + + // Namespace specifies in which namespace velero backup objects should be created. + // If not specified, the default is a namespace named "velero". + // +optional + Namespace string `json:"namespace,omitempty"` +} + +// FailedProvisionConfig contains settings to control behavior undertaken by Hive when an installation attempt fails. +type FailedProvisionConfig struct { + + // TODO: Figure out how to mark SkipGatherLogs as deprecated (more than just a comment) + + // DEPRECATED: This flag is no longer respected and will be removed in the future. + SkipGatherLogs bool `json:"skipGatherLogs,omitempty"` + AWS *FailedProvisionAWSConfig `json:"aws,omitempty"` +} + +// ManageDNSConfig contains the domain being managed, and the cloud-specific +// details for accessing/managing the domain. +type ManageDNSConfig struct { + + // Domains is the list of domains that hive will be managing entries for with the provided credentials. + Domains []string `json:"domains"` + + // AWS contains AWS-specific settings for external DNS + // +optional + AWS *ManageDNSAWSConfig `json:"aws,omitempty"` + + // GCP contains GCP-specific settings for external DNS + // +optional + GCP *ManageDNSGCPConfig `json:"gcp,omitempty"` + + // Azure contains Azure-specific settings for external DNS + // +optional + Azure *ManageDNSAzureConfig `json:"azure,omitempty"` + + // As other cloud providers are supported, additional fields will be + // added for each of those cloud providers. Only a single cloud provider + // may be configured at a time. +} + +// FailedProvisionAWSConfig contains AWS-specific info to upload log files. +type FailedProvisionAWSConfig struct { + // CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with + // AWS S3. It will need permission to upload logs to S3. + // Secret should have keys named aws_access_key_id and aws_secret_access_key that contain the AWS credentials. + // Example Secret: + // data: + // aws_access_key_id: minio + // aws_secret_access_key: minio123 + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // Region is the AWS region to use for S3 operations. + // This defaults to us-east-1. + // For AWS China, use cn-northwest-1. + // +optional + Region string `json:"region,omitempty"` + + // ServiceEndpoint is the url to connect to an S3 compatible provider. + ServiceEndpoint string `json:"serviceEndpoint,omitempty"` + + // Bucket is the S3 bucket to store the logs in. + Bucket string `json:"bucket,omitempty"` +} + +// ManageDNSAWSConfig contains AWS-specific info to manage a given domain. +type ManageDNSAWSConfig struct { + // CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with + // AWS Route53. It will need permission to manage entries for the domain + // listed in the parent ManageDNSConfig object. + // Secret should have AWS keys named 'aws_access_key_id' and 'aws_secret_access_key'. + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // Region is the AWS region to use for route53 operations. + // This defaults to us-east-1. + // For AWS China, use cn-northwest-1. + // +optional + Region string `json:"region,omitempty"` +} + +// ManageDNSGCPConfig contains GCP-specific info to manage a given domain. +type ManageDNSGCPConfig struct { + // CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with + // GCP DNS. It will need permission to manage entries in each of the + // managed domains for this cluster. + // listed in the parent ManageDNSConfig object. + // Secret should have a key named 'osServiceAccount.json'. + // The credentials must specify the project to use. + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` +} + +type DeleteProtectionType string + +const ( + DeleteProtectionEnabled DeleteProtectionType = "enabled" +) + +// ManageDNSAzureConfig contains Azure-specific info to manage a given domain +type ManageDNSAzureConfig struct { + // CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with + // Azure DNS. It wil need permission to manage entries in each of the + // managed domains listed in the parent ManageDNSConfig object. + // Secret should have a key named 'osServicePrincipal.json' + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // ResourceGroupName specifies the Azure resource group containing the DNS zones + // for the domains being managed. + ResourceGroupName string `json:"resourceGroupName"` +} + +// ControllerConfig contains the configuration for a controller +type ControllerConfig struct { + // ConcurrentReconciles specifies number of concurrent reconciles for a controller + // +optional + ConcurrentReconciles *int32 `json:"concurrentReconciles,omitempty"` + // ClientQPS specifies client rate limiter QPS for a controller + // +optional + ClientQPS *int32 `json:"clientQPS,omitempty"` + // ClientBurst specifies client rate limiter burst for a controller + // +optional + ClientBurst *int32 `json:"clientBurst,omitempty"` + // QueueQPS specifies workqueue rate limiter QPS for a controller + // +optional + QueueQPS *int32 `json:"queueQPS,omitempty"` + // QueueBurst specifies workqueue rate limiter burst for a controller + // +optional + QueueBurst *int32 `json:"queueBurst,omitempty"` + // Replicas specifies the number of replicas the specific controller pod should use. + // This is ONLY for controllers that have been split out into their own pods. + // This is ignored for all others. + Replicas *int32 `json:"replicas,omitempty"` +} + +// +kubebuilder:validation:Enum=clusterDeployment;clusterrelocate;clusterstate;clusterversion;controlPlaneCerts;dnsendpoint;dnszone;remoteingress;remotemachineset;syncidentityprovider;unreachable;velerobackup;clusterprovision;clusterDeprovision;clusterpool;clusterpoolnamespace;hibernation;clusterclaim;metrics;clustersync +type ControllerName string + +func (controllerName ControllerName) String() string { + return string(controllerName) +} + +// ControllerNames is a slice of controller names +type ControllerNames []ControllerName + +// Contains says whether or not the controller name is in the slice of controller names. +func (c ControllerNames) Contains(controllerName ControllerName) bool { + for _, curControllerName := range c { + if curControllerName == controllerName { + return true + } + } + + return false +} + +// WARNING: All the controller names below should also be added to the kubebuilder validation of the type ControllerName +const ( + ClusterClaimControllerName ControllerName = "clusterclaim" + ClusterDeploymentControllerName ControllerName = "clusterDeployment" + ClusterDeprovisionControllerName ControllerName = "clusterDeprovision" + ClusterpoolControllerName ControllerName = "clusterpool" + ClusterpoolNamespaceControllerName ControllerName = "clusterpoolnamespace" + ClusterProvisionControllerName ControllerName = "clusterProvision" + ClusterRelocateControllerName ControllerName = "clusterRelocate" + ClusterStateControllerName ControllerName = "clusterState" + ClusterVersionControllerName ControllerName = "clusterversion" + ControlPlaneCertsControllerName ControllerName = "controlPlaneCerts" + DNSEndpointControllerName ControllerName = "dnsendpoint" + DNSZoneControllerName ControllerName = "dnszone" + HibernationControllerName ControllerName = "hibernation" + RemoteIngressControllerName ControllerName = "remoteingress" + RemoteMachinesetControllerName ControllerName = "remotemachineset" + SyncIdentityProviderControllerName ControllerName = "syncidentityprovider" + UnreachableControllerName ControllerName = "unreachable" + VeleroBackupControllerName ControllerName = "velerobackup" + MetricsControllerName ControllerName = "metrics" + ClustersyncControllerName ControllerName = "clustersync" +) + +// SpecificControllerConfig contains the configuration for a specific controller +type SpecificControllerConfig struct { + // Name specifies the name of the controller + Name ControllerName `json:"name"` + // ControllerConfig contains the configuration for the controller specified by Name field + Config ControllerConfig `json:"config"` +} + +// ControllersConfig contains default as well as controller specific configurations +type ControllersConfig struct { + // Default specifies default configuration for all the controllers, can be used to override following coded defaults + // default for concurrent reconciles is 5 + // default for client qps is 5 + // default for client burst is 10 + // default for queue qps is 10 + // default for queue burst is 100 + // +optional + Default *ControllerConfig `json:"default,omitempty"` + // Controllers contains a list of configurations for different controllers + // +optional + Controllers []SpecificControllerConfig `json:"controllers,omitempty"` +} + +// +genclient:nonNamespaced +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HiveConfig is the Schema for the hives API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +type HiveConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HiveConfigSpec `json:"spec,omitempty"` + Status HiveConfigStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HiveConfigList contains a list of Hive +type HiveConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HiveConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HiveConfig{}, &HiveConfigList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/machinepool_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/machinepool_types.go new file mode 100644 index 00000000000..2d6361c8531 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/machinepool_types.go @@ -0,0 +1,184 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/hive/pkg/apis/hive/v1/aws" + "github.com/openshift/hive/pkg/apis/hive/v1/azure" + "github.com/openshift/hive/pkg/apis/hive/v1/gcp" + "github.com/openshift/hive/pkg/apis/hive/v1/openstack" + "github.com/openshift/hive/pkg/apis/hive/v1/ovirt" + "github.com/openshift/hive/pkg/apis/hive/v1/vsphere" +) + +const ( + // MachinePoolImageIDOverrideAnnotation can be applied to MachinePools to control the precise image ID to be used + // for the MachineSets we reconcile for this pool. This feature is presently only implemented for AWS, and + // is intended for very limited use cases we do not recommend pursuing regularly. As such it is not currently + // part of our official API. + MachinePoolImageIDOverrideAnnotation = "hive.openshift.io/image-id-override" +) + +// MachinePoolSpec defines the desired state of MachinePool +type MachinePoolSpec struct { + + // ClusterDeploymentRef references the cluster deployment to which this + // machine pool belongs. + ClusterDeploymentRef corev1.LocalObjectReference `json:"clusterDeploymentRef"` + + // Name is the name of the machine pool. + Name string `json:"name"` + + // Replicas is the count of machines for this machine pool. + // Replicas and autoscaling cannot be used together. + // Default is 1, if autoscaling is not used. + // +optional + Replicas *int64 `json:"replicas,omitempty"` + + // Autoscaling is the details for auto-scaling the machine pool. + // Replicas and autoscaling cannot be used together. + // +optional + Autoscaling *MachinePoolAutoscaling `json:"autoscaling,omitempty"` + + // Platform is configuration for machine pool specific to the platform. + Platform MachinePoolPlatform `json:"platform"` + + // Map of label string keys and values that will be applied to the created MachineSet's + // MachineSpec. This list will overwrite any modifications made to Node labels on an + // ongoing basis. + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // List of taints that will be applied to the created MachineSet's MachineSpec. + // This list will overwrite any modifications made to Node taints on an ongoing basis. + // +optional + Taints []corev1.Taint `json:"taints,omitempty"` +} + +// MachinePoolAutoscaling details how the machine pool is to be auto-scaled. +type MachinePoolAutoscaling struct { + // MinReplicas is the minimum number of replicas for the machine pool. + MinReplicas int32 `json:"minReplicas"` + + // MaxReplicas is the maximum number of replicas for the machine pool. + MaxReplicas int32 `json:"maxReplicas"` +} + +// MachinePoolPlatform is the platform-specific configuration for a machine +// pool. Only one of the platforms should be set. +type MachinePoolPlatform struct { + // AWS is the configuration used when installing on AWS. + AWS *aws.MachinePoolPlatform `json:"aws,omitempty"` + // Azure is the configuration used when installing on Azure. + Azure *azure.MachinePool `json:"azure,omitempty"` + // GCP is the configuration used when installing on GCP. + GCP *gcp.MachinePool `json:"gcp,omitempty"` + // OpenStack is the configuration used when installing on OpenStack. + OpenStack *openstack.MachinePool `json:"openstack,omitempty"` + // VSphere is the configuration used when installing on vSphere + VSphere *vsphere.MachinePool `json:"vsphere,omitempty"` + // Ovirt is the configuration used when installing on oVirt. + Ovirt *ovirt.MachinePool `json:"ovirt,omitempty"` +} + +// MachinePoolStatus defines the observed state of MachinePool +type MachinePoolStatus struct { + // Replicas is the current number of replicas for the machine pool. + // +optional + Replicas int32 `json:"replicas,omitempty"` + + // MachineSets is the status of the machine sets for the machine pool on the remote cluster. + MachineSets []MachineSetStatus `json:"machineSets,omitempty"` + + // Conditions includes more detailed status for the cluster deployment + // +optional + Conditions []MachinePoolCondition `json:"conditions,omitempty"` +} + +// MachineSetStatus is the status of a machineset in the remote cluster. +type MachineSetStatus struct { + // Name is the name of the machine set. + Name string `json:"name"` + + // Replicas is the current number of replicas for the machine set. + Replicas int32 `json:"replicas"` + + // MinReplicas is the minimum number of replicas for the machine set. + MinReplicas int32 `json:"minReplicas"` + + // MaxReplicas is the maximum number of replicas for the machine set. + MaxReplicas int32 `json:"maxReplicas"` +} + +// MachinePoolCondition contains details for the current condition of a machine pool +type MachinePoolCondition struct { + // Type is the type of the condition. + Type MachinePoolConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// MachinePoolConditionType is a valid value for MachinePoolCondition.Type +type MachinePoolConditionType string + +const ( + // NotEnoughReplicasMachinePoolCondition is true when the minReplicas field + // is set too low for the number of machinesets for the machine pool. + NotEnoughReplicasMachinePoolCondition MachinePoolConditionType = "NotEnoughReplicas" + + // NoMachinePoolNameLeasesAvailable is true when the cloud provider requires a name lease for the in-cluster MachineSet, but no + // leases are available. + NoMachinePoolNameLeasesAvailable MachinePoolConditionType = "NoMachinePoolNameLeasesAvailable" + + // InvalidSubnetsMachinePoolCondition is true when there are missing or invalid entries in the subnet field + InvalidSubnetsMachinePoolCondition MachinePoolConditionType = "InvalidSubnets" + + // UnsupportedConfigurationMachinePoolCondition is true when the configuration of the MachinePool is unsupported + // by the cluster. + UnsupportedConfigurationMachinePoolCondition MachinePoolConditionType = "UnsupportedConfiguration" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachinePool is the Schema for the machinepools API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas +// +kubebuilder:printcolumn:name="PoolName",type="string",JSONPath=".spec.name" +// +kubebuilder:printcolumn:name="ClusterDeployment",type="string",JSONPath=".spec.clusterDeploymentRef.name" +// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas" +// +kubebuilder:resource:path=machinepools,scope=Namespaced +type MachinePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachinePoolSpec `json:"spec,omitempty"` + Status MachinePoolStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachinePoolList contains a list of MachinePool +type MachinePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachinePool `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MachinePool{}, &MachinePoolList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/machinepoolnamelease_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/machinepoolnamelease_types.go new file mode 100644 index 00000000000..c744152b9a7 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/machinepoolnamelease_types.go @@ -0,0 +1,46 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MachinePoolNameLeaseSpec is a minimal resource for obtaining unique machine pool names of a limited length. +type MachinePoolNameLeaseSpec struct { +} + +// MachinePoolNameLeaseStatus defines the observed state of MachinePoolNameLease. +type MachinePoolNameLeaseStatus struct { +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachinePoolNameLease is the Schema for the MachinePoolNameLeases API. This resource is mostly empty +// as we're primarily relying on the name to determine if a lease is available. +// Note that not all cloud providers require the use of a lease for naming, at present this +// is only required for GCP where we're extremely restricted on name lengths. +// +k8s:openapi-gen=true +// +kubebuilder:printcolumn:name="MachinePool",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/machine-pool-name" +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-deployment-name" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Namespaced +type MachinePoolNameLease struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachinePoolNameLeaseSpec `json:"spec,omitempty"` + Status MachinePoolNameLeaseStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachinePoolNameLeaseList contains a list of MachinePoolNameLeases. +type MachinePoolNameLeaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachinePoolNameLease `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MachinePoolNameLease{}, &MachinePoolNameLeaseList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/metaruntimeobject.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/metaruntimeobject.go new file mode 100644 index 00000000000..5513a106f52 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/metaruntimeobject.go @@ -0,0 +1,12 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// MetaRuntimeObject allows for the generic specification of hive objects since all hive objects implement both the meta and runtime object interfaces. +type MetaRuntimeObject interface { + metav1.Object + runtime.Object +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/doc.go new file mode 100644 index 00000000000..80d5d4956ed --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/doc.go @@ -0,0 +1,4 @@ +// Package openstack contains API Schema definitions for OpenStack clusters. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hive +package openstack diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/machinepools.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/machinepools.go new file mode 100644 index 00000000000..2853c7fface --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/machinepools.go @@ -0,0 +1,46 @@ +package openstack + +// MachinePool stores the configuration for a machine pool installed +// on OpenStack. +type MachinePool struct { + // Flavor defines the OpenStack Nova flavor. + // eg. m1.large + // The json key here differs from the installer which uses both "computeFlavor" and type "type" depending on which + // type you're looking at, and the resulting field on the MachineSet is "flavor". We are opting to stay consistent + // with the end result. + Flavor string `json:"flavor"` + + // RootVolume defines the root volume for instances in the machine pool. + // The instances use ephemeral disks if not set. + // +optional + RootVolume *RootVolume `json:"rootVolume,omitempty"` +} + +// Set sets the values from `required` to `a`. +func (o *MachinePool) Set(required *MachinePool) { + if required == nil || o == nil { + return + } + + if required.Flavor != "" { + o.Flavor = required.Flavor + } + + if required.RootVolume != nil { + if o.RootVolume == nil { + o.RootVolume = new(RootVolume) + } + o.RootVolume.Size = required.RootVolume.Size + o.RootVolume.Type = required.RootVolume.Type + } +} + +// RootVolume defines the storage for an instance. +type RootVolume struct { + // Size defines the size of the volume in gibibytes (GiB). + // Required + Size int `json:"size"` + // Type defines the type of the volume. + // Required + Type string `json:"type"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/platform.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/platform.go new file mode 100644 index 00000000000..50aa95d4d3c --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/platform.go @@ -0,0 +1,42 @@ +package openstack + +import ( + corev1 "k8s.io/api/core/v1" +) + +// Platform stores all the global OpenStack configuration +type Platform struct { + // CredentialsSecretRef refers to a secret that contains the OpenStack account access + // credentials. + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // CertificatesSecretRef refers to a secret that contains CA certificates + // necessary for communicating with the OpenStack. + // There is additional configuration required for the OpenShift cluster to trust + // the certificates provided in this secret. + // The "clouds.yaml" file included in the credentialsSecretRef Secret must also include + // a reference to the certificate bundle file for the OpenShift cluster being created to + // trust the OpenStack endpoints. + // The "clouds.yaml" file must set the "cacert" field to + // either "/etc/openstack-ca/" or + // "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem". + // + // For example, + // """clouds.yaml + // clouds: + // shiftstack: + // auth: ... + // cacert: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem" + // """ + // + // +optional + CertificatesSecretRef *corev1.LocalObjectReference `json:"certificatesSecretRef,omitempty"` + + // Cloud will be used to indicate the OS_CLOUD value to use the right section + // from the clouds.yaml in the CredentialsSecretRef. + Cloud string `json:"cloud"` + + // TrunkSupport indicates whether or not to use trunk ports in your OpenShift cluster. + // +optional + TrunkSupport bool `json:"trunkSupport,omitempty"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/zz_generated.deepcopy.go new file mode 100644 index 00000000000..f7314a9e1b3 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/openstack/zz_generated.deepcopy.go @@ -0,0 +1,68 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package openstack + +import ( + v1 "k8s.io/api/core/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(RootVolume) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + if in.CertificatesSecretRef != nil { + in, out := &in.CertificatesSecretRef, &out.CertificatesSecretRef + *out = new(v1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootVolume) DeepCopyInto(out *RootVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootVolume. +func (in *RootVolume) DeepCopy() *RootVolume { + if in == nil { + return nil + } + out := new(RootVolume) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/doc.go new file mode 100644 index 00000000000..9fbf9dd5b59 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/doc.go @@ -0,0 +1,4 @@ +// Package ovirt contains ovirt-specific structures for +// installer configuration and management. +// +k8s:deepcopy-gen=package,register +package ovirt diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/machinepool.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/machinepool.go new file mode 100644 index 00000000000..5670b4647a9 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/machinepool.go @@ -0,0 +1,61 @@ +package ovirt + +// MachinePool stores the configuration for a machine pool installed +// on ovirt. +type MachinePool struct { + // CPU defines the VM CPU. + // +optional + CPU *CPU `json:"cpu,omitempty"` + + // MemoryMB is the size of a VM's memory in MiBs. + // +optional + MemoryMB int32 `json:"memoryMB,omitempty"` + + // OSDisk is the the root disk of the node. + // +optional + OSDisk *Disk `json:"osDisk,omitempty"` + + // VMType defines the workload type of the VM. + // +kubebuilder:validation:Enum="";desktop;server;high_performance + // +optional + VMType VMType `json:"vmType,omitempty"` +} + +// CPU defines the VM cpu, made of (Sockets * Cores). +type CPU struct { + // Sockets is the number of sockets for a VM. + // Total CPUs is (Sockets * Cores) + Sockets int32 `json:"sockets"` + + // Cores is the number of cores per socket. + // Total CPUs is (Sockets * Cores) + Cores int32 `json:"cores"` +} + +// Disk defines a VM disk +type Disk struct { + // SizeGB size of the bootable disk in GiB. + SizeGB int64 `json:"sizeGB"` +} + +// VMType defines the type of the VM, which will change the VM configuration, +// like including or excluding devices (like excluding sound-card), +// device configuration (like using multi-queues for vNic), and several other +// configuration tweaks. This doesn't effect properties like CPU count and amount of memory. +type VMType string + +const ( + // VMTypeDesktop set the VM type to desktop. Virtual machines optimized to act + // as desktop machines do have a sound card, use an image (thin allocation), + // and are stateless. + VMTypeDesktop VMType = "desktop" + // VMTypeServer sets the VM type to server. Virtual machines optimized to act + // as servers have no sound card, use a cloned disk image, and are not stateless. + VMTypeServer VMType = "server" + // VMTypeHighPerformance sets a VM type to high_performance which sets various + // properties of a VM to optimize for performance, like enabling headless mode, + // disabling usb, smart-card, and sound devices, enabling host cpu pass-through, + // multi-queues for vNics and several more items. + // See https://www.ovirt.org/develop/release-management/features/virt/high-performance-vm.html. + VMTypeHighPerformance VMType = "high_performance" +) diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/platform.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/platform.go new file mode 100644 index 00000000000..fc3b8b27668 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/platform.go @@ -0,0 +1,22 @@ +package ovirt + +import ( + corev1 "k8s.io/api/core/v1" +) + +// Platform stores all the global oVirt configuration +type Platform struct { + // The target cluster under which all VMs will run + ClusterID string `json:"ovirt_cluster_id"` + // CredentialsSecretRef refers to a secret that contains the oVirt account access + // credentials with fields: ovirt_url, ovirt_username, ovirt_password, ovirt_ca_bundle + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + // CertificatesSecretRef refers to a secret that contains the oVirt CA certificates + // necessary for communicating with oVirt. + CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"` + // The target storage domain under which all VM disk would be created. + StorageDomainID string `json:"storage_domain_id"` + // The target network of all the network interfaces of the nodes. Omitting defaults to ovirtmgmt + // network which is a default network for evert ovirt cluster. + NetworkName string `json:"ovirt_network_name,omitempty"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/zz_generated.deepcopy.go new file mode 100644 index 00000000000..643fc05b73c --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/ovirt/zz_generated.deepcopy.go @@ -0,0 +1,81 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package ovirt + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPU) DeepCopyInto(out *CPU) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU. +func (in *CPU) DeepCopy() *CPU { + if in == nil { + return nil + } + out := new(CPU) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Disk) DeepCopyInto(out *Disk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Disk. +func (in *Disk) DeepCopy() *Disk { + if in == nil { + return nil + } + out := new(Disk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(CPU) + **out = **in + } + if in.OSDisk != nil { + in, out := &in.OSDisk, &out.OSDisk + *out = new(Disk) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + out.CertificatesSecretRef = in.CertificatesSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/register.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/register.go new file mode 100644 index 00000000000..3160b54d2a5 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/register.go @@ -0,0 +1,36 @@ +// NOTE: Boilerplate only. Ignore this file. + +// Package v1 contains API Schema definitions for the hive v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hive +// +k8s:defaulter-gen=TypeMeta +// +groupName=hive.openshift.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" +) + +var ( + // HiveAPIGroup is the group that all hive objects belong to in the API server. + HiveAPIGroup = "hive.openshift.io" + + // HiveAPIVersion is the api version that all hive objects are currently at. + HiveAPIVersion = "v1" + + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: HiveAPIGroup, Version: HiveAPIVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is a shortcut for SchemeBuilder.AddToScheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/syncidentityprovider_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/syncidentityprovider_types.go new file mode 100644 index 00000000000..af1b03f130f --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/syncidentityprovider_types.go @@ -0,0 +1,98 @@ +package v1 + +import ( + openshiftapiv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SyncIdentityProviderCommonSpec defines the identity providers to sync +type SyncIdentityProviderCommonSpec struct { + //IdentityProviders is an ordered list of ways for a user to identify themselves + // +required + IdentityProviders []openshiftapiv1.IdentityProvider `json:"identityProviders"` +} + +// SelectorSyncIdentityProviderSpec defines the SyncIdentityProviderCommonSpec to sync to +// ClusterDeploymentSelector indicating which clusters the SelectorSyncIdentityProvider applies +// to in any namespace. +type SelectorSyncIdentityProviderSpec struct { + SyncIdentityProviderCommonSpec `json:",inline"` + + // ClusterDeploymentSelector is a LabelSelector indicating which clusters the SelectorIdentityProvider + // applies to in any namespace. + // +optional + ClusterDeploymentSelector metav1.LabelSelector `json:"clusterDeploymentSelector,omitempty"` +} + +// SyncIdentityProviderSpec defines the SyncIdentityProviderCommonSpec identity providers to sync along with +// ClusterDeploymentRefs indicating which clusters the SyncIdentityProvider applies to in the +// SyncIdentityProvider's namespace. +type SyncIdentityProviderSpec struct { + SyncIdentityProviderCommonSpec `json:",inline"` + + // ClusterDeploymentRefs is the list of LocalObjectReference indicating which clusters the + // SyncSet applies to in the SyncSet's namespace. + // +required + ClusterDeploymentRefs []corev1.LocalObjectReference `json:"clusterDeploymentRefs"` +} + +// IdentityProviderStatus defines the observed state of SyncSet +type IdentityProviderStatus struct { +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelectorSyncIdentityProvider is the Schema for the SelectorSyncSet API +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Cluster +type SelectorSyncIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SelectorSyncIdentityProviderSpec `json:"spec,omitempty"` + Status IdentityProviderStatus `json:"status,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SyncIdentityProvider is the Schema for the SyncIdentityProvider API +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +type SyncIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SyncIdentityProviderSpec `json:"spec,omitempty"` + Status IdentityProviderStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelectorSyncIdentityProviderList contains a list of SelectorSyncIdentityProviders +type SelectorSyncIdentityProviderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SelectorSyncIdentityProvider `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SyncIdentityProviderList contains a list of SyncIdentityProviders +type SyncIdentityProviderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SyncIdentityProvider `json:"items"` +} + +func init() { + SchemeBuilder.Register( + &SyncIdentityProvider{}, + &SyncIdentityProviderList{}, + &SelectorSyncIdentityProvider{}, + &SelectorSyncIdentityProviderList{}, + ) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/syncset_types.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/syncset_types.go new file mode 100644 index 00000000000..cabc7fbb679 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/syncset_types.go @@ -0,0 +1,326 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// SyncSetResourceApplyMode is a string representing the mode with which to +// apply SyncSet Resources. +type SyncSetResourceApplyMode string + +const ( + // UpsertResourceApplyMode indicates that objects will be updated + // or inserted (created). + UpsertResourceApplyMode SyncSetResourceApplyMode = "Upsert" + + // SyncResourceApplyMode inherits the create or update functionality + // of Upsert but also indicates that objects will be deleted if created + // previously and detected missing from defined Resources in the SyncSet. + SyncResourceApplyMode SyncSetResourceApplyMode = "Sync" +) + +// SyncSetApplyBehavior is a string representing the behavior to use when +// aplying a syncset to target cluster. +// +kubebuilder:validation:Enum="";Apply;CreateOnly;CreateOrUpdate +type SyncSetApplyBehavior string + +const ( + // ApplySyncSetApplyBehavior is the default apply behavior. It will result + // in resources getting applied using the 'oc apply' command to the target + // cluster. + ApplySyncSetApplyBehavior SyncSetApplyBehavior = "Apply" + + // CreateOnlySyncSetApplyBehavior results in resources only getting created + // if they do not exist, otherwise they are left alone. + CreateOnlySyncSetApplyBehavior SyncSetApplyBehavior = "CreateOnly" + + // CreateOrUpdateSyncSetApplyBehavior results in resources getting created if + // they do not exist, otherwise they are updated with the contents of the + // syncset resource. This is different from Apply behavior in that an annotation + // is not added to the target resource with the "lastApplied" value. It allows + // for syncing larger resources, but loses the ability to sync map entry deletes. + CreateOrUpdateSyncSetApplyBehavior SyncSetApplyBehavior = "CreateOrUpdate" +) + +// SyncSetPatchApplyMode is a string representing the mode with which to apply +// SyncSet Patches. +type SyncSetPatchApplyMode string + +const ( + // ApplyOncePatchApplyMode indicates that the patch should be applied + // only once. + ApplyOncePatchApplyMode SyncSetPatchApplyMode = "ApplyOnce" + + // AlwaysApplyPatchApplyMode indicates that the patch should be + // continuously applied. + AlwaysApplyPatchApplyMode SyncSetPatchApplyMode = "AlwaysApply" +) + +// SyncObjectPatch represents a patch to be applied to a specific object +type SyncObjectPatch struct { + // APIVersion is the Group and Version of the object to be patched. + APIVersion string `json:"apiVersion"` + + // Kind is the Kind of the object to be patched. + Kind string `json:"kind"` + + // Name is the name of the object to be patched. + Name string `json:"name"` + + // Namespace is the Namespace in which the object to patch exists. + // Defaults to the SyncSet's Namespace. + // +optional + Namespace string `json:"namespace,omitempty"` + + // Patch is the patch to apply. + Patch string `json:"patch"` + + // PatchType indicates the PatchType as "strategic" (default), "json", or "merge". + // +optional + PatchType string `json:"patchType,omitempty"` +} + +// SecretReference is a reference to a secret by name and namespace +type SecretReference struct { + // Name is the name of the secret + Name string `json:"name"` + // Namespace is the namespace where the secret lives. If not present for the source + // secret reference, it is assumed to be the same namespace as the syncset with the + // reference. + // +optional + Namespace string `json:"namespace,omitempty"` +} + +// SecretMapping defines a source and destination for a secret to be synced by a SyncSet +type SecretMapping struct { + + // SourceRef specifies the name and namespace of a secret on the management cluster + SourceRef SecretReference `json:"sourceRef"` + + // TargetRef specifies the target name and namespace of the secret on the target cluster + TargetRef SecretReference `json:"targetRef"` +} + +// SyncConditionType is a valid value for SyncCondition.Type +type SyncConditionType string + +const ( + // ApplySuccessSyncCondition indicates whether the resource or patch has been applied. + ApplySuccessSyncCondition SyncConditionType = "ApplySuccess" + + // ApplyFailureSyncCondition indicates that a resource or patch has failed to apply. + // It should include a reason and message for the failure. + ApplyFailureSyncCondition SyncConditionType = "ApplyFailure" + + // DeletionFailedSyncCondition indicates that resource deletion has failed. + // It should include a reason and message for the failure. + DeletionFailedSyncCondition SyncConditionType = "DeletionFailed" + + // UnknownObjectSyncCondition indicates that the resource type cannot be determined. + // It should include a reason and message for the failure. + UnknownObjectSyncCondition SyncConditionType = "UnknownObject" +) + +// SyncCondition is a condition in a SyncStatus +type SyncCondition struct { + // Type is the type of the condition. + Type SyncConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// SyncSetObjectStatus describes the status of resources created or patches that have +// been applied from a SyncSet or SelectorSyncSet. +type SyncSetObjectStatus struct { + // Name is the name of the SyncSet. + Name string `json:"name"` + + // Resources is the list of SyncStatus for objects that have been synced. + // +optional + Resources []SyncStatus `json:"resources,omitempty"` + + // ResourceApplyMode indicates if the Resource apply mode is "Upsert" (default) or "Sync". + // ApplyMode "Upsert" indicates create and update. + // ApplyMode "Sync" indicates create, update and delete. + // +optional + ResourceApplyMode SyncSetResourceApplyMode `json:"resourceApplyMode,omitempty"` + + // Patches is the list of SyncStatus for patches that have been applied. + // +optional + Patches []SyncStatus `json:"patches,omitempty"` + + // Secrets is the list of SyncStatus for secrets that have been synced. + // +optional + Secrets []SyncStatus `json:"secrets,omitempty"` + + // Conditions is the list of SyncConditions used to indicate UnknownObject + // when a resource type cannot be determined from a SyncSet resource. + // +optional + Conditions []SyncCondition `json:"conditions,omitempty"` +} + +// SyncStatus describes objects that have been created or patches that +// have been applied using the unique md5 sum of the object or patch. +type SyncStatus struct { + // APIVersion is the Group and Version of the object that was synced or + // patched. + APIVersion string `json:"apiVersion"` + + // Kind is the Kind of the object that was synced or patched. + Kind string `json:"kind"` + + // Resource is the resource name for the object that was synced. + // This will be populated for resources, but not patches + // +optional + Resource string `json:"resource,omitempty"` + + // Name is the name of the object that was synced or patched. + Name string `json:"name"` + + // Namespace is the Namespace of the object that was synced or patched. + Namespace string `json:"namespace"` + + // Hash is the unique md5 hash of the resource or patch. + Hash string `json:"hash"` + + // Conditions is the list of conditions indicating success or failure of object + // create, update and delete as well as patch application. + Conditions []SyncCondition `json:"conditions"` +} + +// SyncSetCommonSpec defines the resources and patches to sync +type SyncSetCommonSpec struct { + // Resources is the list of objects to sync from RawExtension definitions. + // +optional + Resources []runtime.RawExtension `json:"resources,omitempty"` + + // ResourceApplyMode indicates if the Resource apply mode is "Upsert" (default) or "Sync". + // ApplyMode "Upsert" indicates create and update. + // ApplyMode "Sync" indicates create, update and delete. + // +optional + ResourceApplyMode SyncSetResourceApplyMode `json:"resourceApplyMode,omitempty"` + + // Patches is the list of patches to apply. + // +optional + Patches []SyncObjectPatch `json:"patches,omitempty"` + + // Secrets is the list of secrets to sync along with their respective destinations. + // +optional + Secrets []SecretMapping `json:"secretMappings,omitempty"` + + // ApplyBehavior indicates how resources in this syncset will be applied to the target + // cluster. The default value of "Apply" indicates that resources should be applied + // using the 'oc apply' command. If no value is set, "Apply" is assumed. + // A value of "CreateOnly" indicates that the resource will only be created if it does + // not already exist in the target cluster. Otherwise, it will be left alone. + // A value of "CreateOrUpdate" indicates that the resource will be created/updated without + // the use of the 'oc apply' command, allowing larger resources to be synced, but losing + // some functionality of the 'oc apply' command such as the ability to remove annotations, + // labels, and other map entries in general. + // +optional + ApplyBehavior SyncSetApplyBehavior `json:"applyBehavior,omitempty"` +} + +// SelectorSyncSetSpec defines the SyncSetCommonSpec resources and patches to sync along +// with a ClusterDeploymentSelector indicating which clusters the SelectorSyncSet applies +// to in any namespace. +type SelectorSyncSetSpec struct { + SyncSetCommonSpec `json:",inline"` + + // ClusterDeploymentSelector is a LabelSelector indicating which clusters the SelectorSyncSet + // applies to in any namespace. + // +optional + ClusterDeploymentSelector metav1.LabelSelector `json:"clusterDeploymentSelector,omitempty"` +} + +// SyncSetSpec defines the SyncSetCommonSpec resources and patches to sync along with +// ClusterDeploymentRefs indicating which clusters the SyncSet applies to in the +// SyncSet's namespace. +type SyncSetSpec struct { + SyncSetCommonSpec `json:",inline"` + + // ClusterDeploymentRefs is the list of LocalObjectReference indicating which clusters the + // SyncSet applies to in the SyncSet's namespace. + // +required + ClusterDeploymentRefs []corev1.LocalObjectReference `json:"clusterDeploymentRefs"` +} + +// SyncSetStatus defines the observed state of a SyncSet +type SyncSetStatus struct { +} + +// SelectorSyncSetStatus defines the observed state of a SelectorSyncSet +type SelectorSyncSetStatus struct { +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelectorSyncSet is the Schema for the SelectorSyncSet API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=selectorsyncsets,shortName=sss,scope=Cluster +type SelectorSyncSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SelectorSyncSetSpec `json:"spec,omitempty"` + Status SelectorSyncSetStatus `json:"status,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SyncSet is the Schema for the SyncSet API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=syncsets,shortName=ss,scope=Namespaced +type SyncSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SyncSetSpec `json:"spec,omitempty"` + Status SyncSetStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelectorSyncSetList contains a list of SyncSets +type SelectorSyncSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SelectorSyncSet `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SyncSetList contains a list of SyncSets +type SyncSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SyncSet `json:"items"` +} + +func init() { + SchemeBuilder.Register( + &SyncSet{}, + &SyncSetList{}, + &SelectorSyncSet{}, + &SelectorSyncSetList{}, + ) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterdeployment_validating_admission_hook.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterdeployment_validating_admission_hook.go new file mode 100644 index 00000000000..39eb5c07773 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterdeployment_validating_admission_hook.go @@ -0,0 +1,827 @@ +package validatingwebhooks + +import ( + "fmt" + "net/http" + "reflect" + "regexp" + "strconv" + "strings" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + apivalidation "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" + "github.com/openshift/hive/pkg/constants" + "github.com/openshift/hive/pkg/manageddns" +) + +const ( + clusterDeploymentGroup = "hive.openshift.io" + clusterDeploymentVersion = "v1" + clusterDeploymentResource = "clusterdeployments" + + clusterDeploymentAdmissionGroup = "admission.hive.openshift.io" + clusterDeploymentAdmissionVersion = "v1" +) + +var ( + mutableFields = []string{"CertificateBundles", "ClusterMetadata", "ControlPlaneConfig", "Ingress", "Installed", "PreserveOnDelete", "ClusterPoolRef", "PowerState", "HibernateAfter", "InstallAttemptsLimit"} +) + +// ClusterDeploymentValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type ClusterDeploymentValidatingAdmissionHook struct { + decoder *admission.Decoder + validManagedDomains []string + fs *featureSet +} + +// NewClusterDeploymentValidatingAdmissionHook constructs a new ClusterDeploymentValidatingAdmissionHook +func NewClusterDeploymentValidatingAdmissionHook(decoder *admission.Decoder) *ClusterDeploymentValidatingAdmissionHook { + logger := log.WithField("validatingWebhook", "clusterdeployment") + managedDomains, err := manageddns.ReadManagedDomainsFile() + if err != nil { + logger.WithError(err).Fatal("Unable to read managedDomains file") + } + domains := []string{} + for _, md := range managedDomains { + domains = append(domains, md.Domains...) + } + logger.WithField("managedDomains", domains).Info("Read managed domains") + return &ClusterDeploymentValidatingAdmissionHook{ + decoder: decoder, + validManagedDomains: domains, + fs: newFeatureSet(), + } +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/clusterdeploymentvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *ClusterDeploymentValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": clusterDeploymentAdmissionGroup, + "version": clusterDeploymentAdmissionVersion, + "resource": "clusterdeploymentvalidator", + }).Info("Registering validation REST resource") + + // NOTE: This GVR is meant to be different than the ClusterDeployment CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: clusterDeploymentAdmissionGroup, + Version: clusterDeploymentAdmissionVersion, + Resource: "clusterdeploymentvalidators", + }, + "clusterdeploymentvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *ClusterDeploymentValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": clusterDeploymentAdmissionGroup, + "version": clusterDeploymentAdmissionVersion, + "resource": "clusterdeploymentvalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *ClusterDeploymentValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's Allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + switch admissionSpec.Operation { + case admissionv1beta1.Create: + return a.validateCreate(admissionSpec) + case admissionv1beta1.Update: + return a.validateUpdate(admissionSpec) + case admissionv1beta1.Delete: + return a.validateDelete(admissionSpec) + default: + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *ClusterDeploymentValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != clusterDeploymentGroup { + contextLogger.Debug("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != clusterDeploymentVersion { + contextLogger.Debug("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != clusterDeploymentResource { + contextLogger.Debug("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for ClusterDeployment objects. +func (a *ClusterDeploymentValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + if admResp := validatefeatureGates(a.decoder, admissionSpec, a.fs, contextLogger); admResp != nil { + contextLogger.Errorf("object was rejected due to feature gate failures") + return admResp + } + + newObject := &hivev1.ClusterDeployment{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + // TODO: Put Create Validation Here (or in openAPIV3Schema validation section of crd) + + if len(newObject.Name) > validation.DNS1123LabelMaxLength { + message := fmt.Sprintf("Invalid cluster deployment name (.meta.name): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength)) + contextLogger.Error(message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + if len(newObject.Spec.ClusterName) > validation.DNS1123LabelMaxLength { + message := fmt.Sprintf("Invalid cluster name (.spec.clusterName): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength)) + contextLogger.Error(message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + // validate the ingress + if ingressValidationResult := validateIngress(newObject, contextLogger); ingressValidationResult != nil { + return ingressValidationResult + } + + // validate the certificate bundles + if r := validateCertificateBundles(newObject, contextLogger); r != nil { + return r + } + + if newObject.Spec.ManageDNS { + if !validateDomain(newObject.Spec.BaseDomain, a.validManagedDomains) { + message := "The base domain must be a child of one of the managed domains for ClusterDeployments with manageDNS set to true" + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + } + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + if !newObject.Spec.Installed { + if newObject.Spec.Provisioning == nil { + allErrs = append(allErrs, field.Required(specPath.Child("provisioning"), "provisioning is required if not installed")) + + } else if newObject.Spec.Provisioning.InstallConfigSecretRef == nil || newObject.Spec.Provisioning.InstallConfigSecretRef.Name == "" { + // InstallConfigSecretRef is not required for agent install strategy + if newObject.Spec.Provisioning.InstallStrategy == nil || newObject.Spec.Provisioning.InstallStrategy.Agent == nil { + allErrs = append(allErrs, field.Required(specPath.Child("provisioning", "installConfigSecretRef", "name"), "must specify an InstallConfig")) + } + } + } + + allErrs = append(allErrs, validateClusterPlatform(specPath.Child("platform"), newObject.Spec.Platform)...) + allErrs = append(allErrs, validateCanManageDNSForClusterPlatform(specPath, newObject.Spec)...) + + if newObject.Spec.Provisioning != nil { + if newObject.Spec.Provisioning.SSHPrivateKeySecretRef != nil && newObject.Spec.Provisioning.SSHPrivateKeySecretRef.Name == "" { + allErrs = append(allErrs, field.Required(specPath.Child("provisioning", "sshPrivateKeySecretRef", "name"), "must specify a name for the ssh private key secret if the ssh private key secret is specified")) + } + } + + if poolRef := newObject.Spec.ClusterPoolRef; poolRef != nil { + if claimName := poolRef.ClaimName; claimName != "" { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterPoolRef", "claimName"), claimName, "cannot create a ClusterDeployment that is already claimed")) + } + } + + if len(allErrs) > 0 { + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +func validatefeatureGates(decoder *admission.Decoder, admissionSpec *admissionv1beta1.AdmissionRequest, fs *featureSet, contextLogger *log.Entry) *admissionv1beta1.AdmissionResponse { + obj := &unstructured.Unstructured{} + if err := decoder.DecodeRaw(admissionSpec.Object, obj); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + contextLogger.WithField("enabledFeatureGates", fs.Enabled).Info("feature gates enabled") + + errs := field.ErrorList{} + // To add validation for feature gates use these examples + // errs = append(errs, equalOnlyWhenFeatureGate(fs, obj, "spec.platform.type", "AlphaPlatformAEnabled", "platformA")...) + errs = append(errs, existsOnlyWhenFeatureGate(fs, obj, "spec.provisioning.installStrategy.agent", hivev1.FeatureGateAgentInstallStrategy)...) + + if len(errs) > 0 && len(errs.ToAggregate().Errors()) > 0 { + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, errs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + return nil +} + +func validateClusterPlatform(path *field.Path, platform hivev1.Platform) field.ErrorList { + allErrs := field.ErrorList{} + numberOfPlatforms := 0 + if aws := platform.AWS; aws != nil { + numberOfPlatforms++ + awsPath := path.Child("aws") + if aws.CredentialsSecretRef.Name == "" { + allErrs = append(allErrs, field.Required(awsPath.Child("credentialsSecretRef", "name"), "must specify secrets for AWS access")) + } + if aws.Region == "" { + allErrs = append(allErrs, field.Required(awsPath.Child("region"), "must specify AWS region")) + } + } + if azure := platform.Azure; azure != nil { + numberOfPlatforms++ + azurePath := path.Child("azure") + if azure.CredentialsSecretRef.Name == "" { + allErrs = append(allErrs, field.Required(azurePath.Child("credentialsSecretRef", "name"), "must specify secrets for Azure access")) + } + if azure.Region == "" { + allErrs = append(allErrs, field.Required(azurePath.Child("region"), "must specify Azure region")) + } + if azure.BaseDomainResourceGroupName == "" { + allErrs = append(allErrs, field.Required(azurePath.Child("baseDomainResourceGroupName"), "must specify the Azure resource group for the base domain")) + } + } + if gcp := platform.GCP; gcp != nil { + numberOfPlatforms++ + gcpPath := path.Child("gcp") + if gcp.CredentialsSecretRef.Name == "" { + allErrs = append(allErrs, field.Required(gcpPath.Child("credentialsSecretRef", "name"), "must specify secrets for GCP access")) + } + if gcp.Region == "" { + allErrs = append(allErrs, field.Required(gcpPath.Child("region"), "must specify GCP region")) + } + } + if openstack := platform.OpenStack; openstack != nil { + numberOfPlatforms++ + openstackPath := path.Child("openStack") + if openstack.CredentialsSecretRef.Name == "" { + allErrs = append(allErrs, field.Required(openstackPath.Child("credentialsSecretRef", "name"), "must specify secrets for OpenStack access")) + } + if openstack.CertificatesSecretRef != nil && openstack.CertificatesSecretRef.Name == "" { + allErrs = append(allErrs, field.Required(openstackPath.Child("certificatesSecretRef", "name"), "must specify name of the secret for OpenStack access")) + } + if openstack.Cloud == "" { + allErrs = append(allErrs, field.Required(openstackPath.Child("cloud"), "must specify cloud section of credentials secret to use")) + } + } + if vsphere := platform.VSphere; vsphere != nil { + numberOfPlatforms++ + vspherePath := path.Child("vsphere") + if vsphere.CredentialsSecretRef.Name == "" { + allErrs = append(allErrs, field.Required(vspherePath.Child("credentialsSecretRef", "name"), "must specify secrets for vSphere access")) + } + if vsphere.CertificatesSecretRef.Name == "" { + allErrs = append(allErrs, field.Required(vspherePath.Child("certificatesSecretRef", "name"), "must specify certificates for vSphere access")) + } + if vsphere.VCenter == "" { + allErrs = append(allErrs, field.Required(vspherePath.Child("vCenter"), "must specify vSphere vCenter")) + } + if vsphere.Datacenter == "" { + allErrs = append(allErrs, field.Required(vspherePath.Child("datacenter"), "must specify vSphere datacenter")) + } + if vsphere.DefaultDatastore == "" { + allErrs = append(allErrs, field.Required(vspherePath.Child("defaultDatastore"), "must specify vSphere defaultDatastore")) + } + } + if ovirt := platform.Ovirt; ovirt != nil { + numberOfPlatforms++ + ovirtPath := path.Child("ovirt") + if ovirt.CredentialsSecretRef.Name == "" { + allErrs = append(allErrs, field.Required(ovirtPath.Child("credentialsSecretRef", "name"), "must specify secrets for oVirt access")) + } + if ovirt.CertificatesSecretRef.Name == "" { + allErrs = append(allErrs, field.Required(ovirtPath.Child("certificatesSecretRef", "name"), "must specify certificates for oVirt access")) + } + if ovirt.ClusterID == "" { + allErrs = append(allErrs, field.Required(ovirtPath.Child("ovirt_cluster_id"), "must specify ovirt_cluster_id")) + } + if ovirt.StorageDomainID == "" { + allErrs = append(allErrs, field.Required(ovirtPath.Child("ovirt_storage_domain_id"), "must specify ovirt_storage_domain_id")) + } + } + if baremetal := platform.BareMetal; baremetal != nil { + numberOfPlatforms++ + } + if agent := platform.AgentBareMetal; agent != nil { + numberOfPlatforms++ + // TODO: add agent metal platform validation + } + switch { + case numberOfPlatforms == 0: + allErrs = append(allErrs, field.Required(path, "must specify a platform")) + case numberOfPlatforms > 1: + allErrs = append(allErrs, field.Invalid(path, platform, "must specify only a single platform")) + } + return allErrs +} + +func validateCanManageDNSForClusterPlatform(specPath *field.Path, spec hivev1.ClusterDeploymentSpec) field.ErrorList { + allErrs := field.ErrorList{} + canManageDNS := false + if spec.Platform.AWS != nil { + canManageDNS = true + } + if spec.Platform.Azure != nil { + canManageDNS = true + } + if spec.Platform.GCP != nil { + canManageDNS = true + } + if !canManageDNS && spec.ManageDNS { + allErrs = append(allErrs, field.Invalid(specPath.Child("manageDNS"), spec.ManageDNS, "cannot manage DNS for the selected platform")) + } + return allErrs +} + +// validateUpdate specifically validates update operations for ClusterDeployment objects. +func (a *ClusterDeploymentValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + if admResp := validatefeatureGates(a.decoder, admissionSpec, a.fs, contextLogger); admResp != nil { + contextLogger.Errorf("object was rejected due to feature gate failures") + return admResp + } + + newObject := &hivev1.ClusterDeployment{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + oldObject := &hivev1.ClusterDeployment{} + if err := a.decoder.DecodeRaw(admissionSpec.OldObject, oldObject); err != nil { + contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["oldObject.Name"] = oldObject.Name + + hasChangedImmutableField, changedFieldName := hasChangedImmutableField(&oldObject.Spec, &newObject.Spec) + if hasChangedImmutableField { + message := fmt.Sprintf("Attempted to change ClusterDeployment.Spec.%v. ClusterDeployment.Spec is immutable except for %v", changedFieldName, mutableFields) + contextLogger.Infof("Failed validation: %v", message) + + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + // validate the newly incoming ingress + if ingressValidationResult := validateIngress(newObject, contextLogger); ingressValidationResult != nil { + return ingressValidationResult + } + + // Now catch the case where there was a previously defined list and now it's being emptied + hasClearedOutPreviouslyDefinedIngressList := hasClearedOutPreviouslyDefinedIngressList(&oldObject.Spec, &newObject.Spec) + if hasClearedOutPreviouslyDefinedIngressList { + message := fmt.Sprintf("Previously defined a list of ingress objects, must provide a default ingress object") + contextLogger.Infof("Failed validation: %v", message) + + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + if newObject.Spec.Installed { + if newObject.Spec.ClusterMetadata != nil { + if oldObject.Spec.Installed { + allErrs = append(allErrs, apivalidation.ValidateImmutableField(newObject.Spec.ClusterMetadata, oldObject.Spec.ClusterMetadata, specPath.Child("clusterMetadata"))...) + } + } else { + allErrs = append(allErrs, field.Required(specPath.Child("clusterMetadata"), "installed cluster must have cluster metadata")) + } + } else { + if oldObject.Spec.Installed { + allErrs = append(allErrs, field.Invalid(specPath.Child("installed"), newObject.Spec.Installed, "cannot make uninstalled once installed")) + } + } + + // Validate the ClusterPoolRef: + switch oldPoolRef, newPoolRef := oldObject.Spec.ClusterPoolRef, newObject.Spec.ClusterPoolRef; { + case oldPoolRef != nil && newPoolRef != nil: + allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.Namespace, oldPoolRef.Namespace, specPath.Child("clusterPoolRef", "namespace"))...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.PoolName, oldPoolRef.PoolName, specPath.Child("clusterPoolRef", "poolName"))...) + if oldClaim := oldPoolRef.ClaimName; oldClaim != "" { + allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.ClaimName, oldClaim, specPath.Child("clusterPoolRef", "claimName"))...) + } + case oldPoolRef != nil && newPoolRef == nil: + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterPoolRef"), newPoolRef, "cannot remove clusterPoolRef")) + case oldPoolRef == nil && newPoolRef != nil: + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterPoolRef"), newPoolRef, "cannot add clusterPoolRef")) + } + + if len(allErrs) > 0 { + contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateDelete specifically validates delete operations for ClusterDeployment objects. +func (a *ClusterDeploymentValidatingAdmissionHook) validateDelete(request *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + logger := log.WithFields(log.Fields{ + "operation": request.Operation, + "group": request.Resource.Group, + "version": request.Resource.Version, + "resource": request.Resource.Resource, + "method": "validateDelete", + }) + + // If running on OpenShift 3.11, OldObject will not be populated. All we can do is accept the DELETE request. + if len(request.OldObject.Raw) == 0 { + logger.Info("Cannot validate the DELETE since OldObject is empty") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + oldObject := &hivev1.ClusterDeployment{} + if err := a.decoder.DecodeRaw(request.OldObject, oldObject); err != nil { + logger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + logger.Data["object.Name"] = oldObject.Name + + var allErrs field.ErrorList + + if value, present := oldObject.Annotations[constants.ProtectedDeleteAnnotation]; present { + if enabled, err := strconv.ParseBool(value); enabled && err == nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("metadata", "annotations", constants.ProtectedDeleteAnnotation), + oldObject.Annotations[constants.ProtectedDeleteAnnotation], + "cannot delete while annotation is present", + )) + } else { + logger.WithField(constants.ProtectedDeleteAnnotation, value).Info("Protected Delete annotation present but not set to true") + } + } + + if len(allErrs) > 0 { + logger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(request.Kind).GroupKind(), request.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + logger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// isFieldMutable says whether the ClusterDeployment.spec field is meant to be mutable or not. +func isFieldMutable(value string) bool { + for _, mutableField := range mutableFields { + if value == mutableField { + return true + } + } + + return false +} + +// hasChangedImmutableField determines if a ClusterDeployment.spec immutable field was changed. +func hasChangedImmutableField(oldObject, newObject *hivev1.ClusterDeploymentSpec) (bool, string) { + ooElem := reflect.ValueOf(oldObject).Elem() + noElem := reflect.ValueOf(newObject).Elem() + + for i := 0; i < ooElem.NumField(); i++ { + ooFieldName := ooElem.Type().Field(i).Name + ooValue := ooElem.Field(i).Interface() + noValue := noElem.Field(i).Interface() + + if !isFieldMutable(ooFieldName) && !reflect.DeepEqual(ooValue, noValue) { + // The field isn't mutable -and- has been changed. DO NOT ALLOW. + return true, ooFieldName + } + } + + return false, "" +} + +func hasClearedOutPreviouslyDefinedIngressList(oldObject, newObject *hivev1.ClusterDeploymentSpec) bool { + // We don't allow a ClusterDeployment which had previously defined a list of Ingress objects + // to then be cleared out. It either must be cleared from the beginning (ie just use default behavior), + // or the ClusterDeployment must continue to define at least the 'default' ingress object. + if len(oldObject.Ingress) > 0 && len(newObject.Ingress) == 0 { + return true + } + + return false +} + +func validateIngressDomainsShareClusterDomain(newObject *hivev1.ClusterDeploymentSpec) bool { + // ingress entries must share the same domain as the cluster + // so watch for an ingress domain ending in: .. + regexString := fmt.Sprintf(`(?i).*\.%s.%s$`, newObject.ClusterName, newObject.BaseDomain) + sharedSubdomain := regexp.MustCompile(regexString) + + for _, ingress := range newObject.Ingress { + if !sharedSubdomain.Match([]byte(ingress.Domain)) { + return false + } + } + return true +} +func validateIngressDomainsNotWildcard(newObject *hivev1.ClusterDeploymentSpec) bool { + // check for domains with leading '*' + // the * is unnecessary as the ingress controller assumes a wildcard + for _, ingress := range newObject.Ingress { + if ingress.Domain[0] == '*' { + return false + } + } + return true +} + +func validateIngressServingCertificateExists(newObject *hivev1.ClusterDeploymentSpec) bool { + // Include the empty string in the set of certs so that an ingress with + // an empty serving certificate passes. + certs := sets.NewString("") + for _, cert := range newObject.CertificateBundles { + certs.Insert(cert.Name) + } + for _, ingress := range newObject.Ingress { + if !certs.Has(ingress.ServingCertificate) { + return false + } + } + return true +} + +// empty ingress is allowed (for create), but if it's non-zero +// it must include an entry for 'default' +func validateIngressList(newObject *hivev1.ClusterDeploymentSpec) bool { + if len(newObject.Ingress) == 0 { + return true + } + + defaultFound := false + for _, ingress := range newObject.Ingress { + if ingress.Name == "default" { + defaultFound = true + } + } + if !defaultFound { + return false + } + + return true +} + +func validateDomain(domain string, validDomains []string) bool { + matchFound := false + for _, validDomain := range validDomains { + // Do not allow the base domain to be the same as one of the managed domains. + if domain == validDomain { + return false + } + dottedValidDomain := "." + validDomain + if !strings.HasSuffix(domain, dottedValidDomain) { + continue + } + childPart := strings.TrimSuffix(domain, dottedValidDomain) + if !strings.ContainsRune(childPart, '.') { + matchFound = true + } + } + return matchFound +} + +func validateIngress(newObject *hivev1.ClusterDeployment, contextLogger *log.Entry) *admissionv1beta1.AdmissionResponse { + if !validateIngressList(&newObject.Spec) { + message := fmt.Sprintf("Ingress list must include a default entry") + contextLogger.Infof("Failed validation: %v", message) + + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + if !validateIngressDomainsNotWildcard(&newObject.Spec) { + message := "Ingress domains must not lead with *" + contextLogger.Infof("Failed validation: %v", message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + if !validateIngressDomainsShareClusterDomain(&newObject.Spec) { + message := "Ingress domains must share the same domain as the cluster" + contextLogger.Infof("Failed validation: %v", message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + if !validateIngressServingCertificateExists(&newObject.Spec) { + message := "Ingress has serving certificate that does not exist in certificate bundle" + contextLogger.Infof("Failed validation: %v", message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + // everything passed + return nil +} + +func validateCertificateBundles(newObject *hivev1.ClusterDeployment, contextLogger *log.Entry) *admissionv1beta1.AdmissionResponse { + for _, certBundle := range newObject.Spec.CertificateBundles { + if certBundle.Name == "" { + message := "Certificate bundle is missing a name" + contextLogger.Infof("Failed validation: %v", message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + if certBundle.CertificateSecretRef.Name == "" { + message := "Certificate bundle is missing a secret reference" + contextLogger.Infof("Failed validation: %v", message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + } + return nil +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterimageset_validating_admission_hook.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterimageset_validating_admission_hook.go new file mode 100644 index 00000000000..23ef24afbe6 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterimageset_validating_admission_hook.go @@ -0,0 +1,233 @@ +package validatingwebhooks + +import ( + "net/http" + "reflect" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" +) + +const ( + clusterImageSetGroup = "hive.openshift.io" + clusterImageSetVersion = "v1" + clusterImageSetResource = "clusterimagesets" +) + +// ClusterImageSetValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type ClusterImageSetValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewClusterImageSetValidatingAdmissionHook constructs a new ClusterImageSetValidatingAdmissionHook +func NewClusterImageSetValidatingAdmissionHook(decoder *admission.Decoder) *ClusterImageSetValidatingAdmissionHook { + return &ClusterImageSetValidatingAdmissionHook{decoder: decoder} +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/clusterimagesetvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *ClusterImageSetValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "clusterimagesetvalidator", + }).Info("Registering validation REST resource") + // NOTE: This GVR is meant to be different than the ClusterImageSet CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: "admission.hive.openshift.io", + Version: "v1", + Resource: "clusterimagesetvalidators", + }, + "clusterimagesetvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *ClusterImageSetValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "clusterimagesetvalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *ClusterImageSetValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + if admissionSpec.Operation == admissionv1beta1.Create { + return a.validateCreate(admissionSpec) + } + + if admissionSpec.Operation == admissionv1beta1.Update { + return a.validateUpdate(admissionSpec) + } + + // We're only validating creates and updates at this time, so all other operations are explicitly allowed. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *ClusterImageSetValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != clusterImageSetGroup { + contextLogger.Debug("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != clusterImageSetVersion { + contextLogger.Debug("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != clusterImageSetResource { + contextLogger.Debug("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for ClusterImageSet objects. +func (a *ClusterImageSetValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + newObject := &hivev1.ClusterImageSet{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + if newObject.Spec.ReleaseImage == "" { + message := "Failed validation: you must specify a release image" + contextLogger.Infof(message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdate specifically validates update operations for ClusterImageSet objects. +func (a *ClusterImageSetValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + newObject := &hivev1.ClusterImageSet{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + oldObject := &hivev1.ClusterImageSet{} + if err := a.decoder.DecodeRaw(admissionSpec.OldObject, oldObject); err != nil { + contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["oldObject.Name"] = oldObject.Name + + if !reflect.DeepEqual(oldObject.Spec, newObject.Spec) { + message := "ClusterImageSet.Spec is immutable" + contextLogger.Infof("Failed validation: %v", message) + + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterpool_validating_admission_hook.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterpool_validating_admission_hook.go new file mode 100644 index 00000000000..07ceefe3691 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterpool_validating_admission_hook.go @@ -0,0 +1,256 @@ +package validatingwebhooks + +import ( + "fmt" + "net/http" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" +) + +const ( + clusterPoolGroup = "hive.openshift.io" + clusterPoolVersion = "v1" + clusterPoolResource = "clusterpools" + + clusterPoolAdmissionGroup = "admission.hive.openshift.io" + clusterPoolAdmissionVersion = "v1" +) + +// ClusterPoolValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type ClusterPoolValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewClusterPoolValidatingAdmissionHook constructs a new ClusterPoolValidatingAdmissionHook +func NewClusterPoolValidatingAdmissionHook(decoder *admission.Decoder) *ClusterPoolValidatingAdmissionHook { + return &ClusterPoolValidatingAdmissionHook{ + decoder: decoder, + } +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/clusterpoolvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *ClusterPoolValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": clusterPoolAdmissionGroup, + "version": clusterPoolAdmissionVersion, + "resource": "clusterpoolvalidator", + }).Info("Registering validation REST resource") + + // NOTE: This GVR is meant to be different than the ClusterPool CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: clusterPoolAdmissionGroup, + Version: clusterPoolAdmissionVersion, + Resource: "clusterpoolvalidators", + }, + "clusterpoolvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *ClusterPoolValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": clusterPoolAdmissionGroup, + "version": clusterPoolAdmissionVersion, + "resource": "clusterpoolvalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *ClusterPoolValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's Allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + switch admissionSpec.Operation { + case admissionv1beta1.Create: + return a.validateCreate(admissionSpec) + case admissionv1beta1.Update: + return a.validateUpdate(admissionSpec) + default: + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *ClusterPoolValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != clusterPoolGroup { + contextLogger.Info("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != clusterPoolVersion { + contextLogger.Info("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != clusterPoolResource { + contextLogger.Info("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for ClusterPool objects. +func (a *ClusterPoolValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + newObject := &hivev1.ClusterPool{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + // TODO: Put Create Validation Here (or in openAPIV3Schema validation section of crd) + + if len(newObject.Name) > validation.DNS1123LabelMaxLength { + message := fmt.Sprintf("Invalid cluster pool name (.meta.name): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength)) + contextLogger.Error(message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + + if len(allErrs) > 0 { + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdate specifically validates update operations for ClusterDeployment objects. +func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + newObject := &hivev1.ClusterPool{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + oldObject := &hivev1.ClusterPool{} + if err := a.decoder.DecodeRaw(admissionSpec.OldObject, oldObject); err != nil { + contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["oldObject.Name"] = oldObject.Name + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + + if len(allErrs) > 0 { + contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterprovision_validating_admission_hook.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterprovision_validating_admission_hook.go new file mode 100644 index 00000000000..af8b2f5f89a --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/clusterprovision_validating_admission_hook.go @@ -0,0 +1,330 @@ +package validatingwebhooks + +import ( + "fmt" + "net/http" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" +) + +const ( + clusterProvisionGroup = "hive.openshift.io" + clusterProvisionVersion = "v1" + clusterProvisionResource = "clusterprovisions" +) + +var ( + validProvisionStages = map[hivev1.ClusterProvisionStage]bool{ + hivev1.ClusterProvisionStageInitializing: true, + hivev1.ClusterProvisionStageProvisioning: true, + hivev1.ClusterProvisionStageComplete: true, + hivev1.ClusterProvisionStageFailed: true, + } + + validProvisionStageValues = func() []string { + v := make([]string, 0, len(validProvisionStages)) + for m := range validProvisionStages { + v = append(v, string(m)) + } + return v + }() +) + +// ClusterProvisionValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type ClusterProvisionValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewClusterProvisionValidatingAdmissionHook constructs a new ClusterProvisionValidatingAdmissionHook +func NewClusterProvisionValidatingAdmissionHook(decoder *admission.Decoder) *ClusterProvisionValidatingAdmissionHook { + return &ClusterProvisionValidatingAdmissionHook{decoder: decoder} +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/clusterprovisionvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *ClusterProvisionValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "clusterprovisionvalidator", + }).Info("Registering validation REST resource") + // NOTE: This GVR is meant to be different than the ClusterProvision CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: "admission.hive.openshift.io", + Version: "v1", + Resource: "clusterprovisionvalidators", + }, + "clusterprovisionvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *ClusterProvisionValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "clusterprovisionvalidator", + }).Info("Initializing validation REST resource") + + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *ClusterProvisionValidatingAdmissionHook) Validate(request *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + logger := log.WithFields(log.Fields{ + "operation": request.Operation, + "group": request.Resource.Group, + "version": request.Resource.Version, + "resource": request.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(request, logger) { + logger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + logger.Info("Validating request") + + switch request.Operation { + case admissionv1beta1.Create: + return a.validateCreateRequest(request, logger) + case admissionv1beta1.Update: + return a.validateUpdateRequest(request, logger) + default: + logger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *ClusterProvisionValidatingAdmissionHook) shouldValidate(request *admissionv1beta1.AdmissionRequest, logger log.FieldLogger) bool { + logger = logger.WithField("method", "shouldValidate") + + if request.Resource.Group != clusterProvisionGroup { + logger.Debug("Returning False, not our group") + return false + } + + if request.Resource.Version != clusterProvisionVersion { + logger.Debug("Returning False, it's our group, but not the right version") + return false + } + + if request.Resource.Resource != clusterProvisionResource { + logger.Debug("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + logger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreateRequest specifically validates create operations for ClusterProvision objects. +func (a *ClusterProvisionValidatingAdmissionHook) validateCreateRequest(request *admissionv1beta1.AdmissionRequest, logger log.FieldLogger) *admissionv1beta1.AdmissionResponse { + logger = logger.WithField("method", "validateCreateRequest") + + newObject, resp := a.decode(request.Object, logger.WithField("decode", "Object")) + if resp != nil { + return resp + } + + logger = logger. + WithField("object.Name", newObject.Name). + WithField("object.Namespace", newObject.Namespace) + + if allErrs := validateClusterProvisionCreate(newObject); len(allErrs) > 0 { + logger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(request.Kind).GroupKind(), request.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + logger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdateRequest specifically validates update operations for ClusterProvision objects. +func (a *ClusterProvisionValidatingAdmissionHook) validateUpdateRequest(request *admissionv1beta1.AdmissionRequest, logger log.FieldLogger) *admissionv1beta1.AdmissionResponse { + logger = logger.WithField("method", "validateUpdateRequest") + + newObject, resp := a.decode(request.Object, logger.WithField("decode", "Object")) + if resp != nil { + return resp + } + + logger = logger. + WithField("object.Name", newObject.Name). + WithField("object.Namespace", newObject.Namespace) + + oldObject, resp := a.decode(request.OldObject, logger.WithField("decode", "OldObject")) + if resp != nil { + return resp + } + + if allErrs := validateClusterProvisionUpdate(oldObject, newObject); len(allErrs) > 0 { + logger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(request.Kind).GroupKind(), request.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + logger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +func (a *ClusterProvisionValidatingAdmissionHook) decode(raw runtime.RawExtension, logger log.FieldLogger) (*hivev1.ClusterProvision, *admissionv1beta1.AdmissionResponse) { + obj := &hivev1.ClusterProvision{} + if err := a.decoder.DecodeRaw(raw, obj); err != nil { + logger.WithError(err).Error("failed to decode") + return nil, &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + return obj, nil +} + +func validateClusterProvisionCreate(provision *hivev1.ClusterProvision) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateClusterProvisionSpecInvariants(&provision.Spec, field.NewPath("spec"))...) + return allErrs +} + +func validateClusterProvisionUpdate(old, new *hivev1.ClusterProvision) field.ErrorList { + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + allErrs = append(allErrs, validateClusterProvisionSpecInvariants(&new.Spec, specPath)...) + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.ClusterDeploymentRef.Name, old.Spec.ClusterDeploymentRef.Name, specPath.Child("clusterDeploymentRef", "name"))...) + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.PodSpec, old.Spec.PodSpec, specPath.Child("podSpec"))...) + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.Attempt, old.Spec.Attempt, specPath.Child("attempt"))...) + if old.Spec.Stage != new.Spec.Stage { + badStageTransition := true + switch old.Spec.Stage { + case hivev1.ClusterProvisionStageInitializing: + badStageTransition = new.Spec.Stage == hivev1.ClusterProvisionStageComplete + case hivev1.ClusterProvisionStageProvisioning: + badStageTransition = new.Spec.Stage == hivev1.ClusterProvisionStageInitializing + } + if badStageTransition { + allErrs = append(allErrs, field.Invalid(specPath.Child("stage"), new.Spec.Stage, fmt.Sprintf("cannot transition from %s to %s", old.Spec.Stage, new.Spec.Stage))) + } + } + if old.Spec.ClusterID != nil { + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.ClusterID, old.Spec.ClusterID, specPath.Child("clusterID"))...) + } + if old.Spec.InfraID != nil { + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.InfraID, old.Spec.InfraID, specPath.Child("infraID"))...) + } + if old.Spec.AdminKubeconfigSecretRef != nil { + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.AdminKubeconfigSecretRef, old.Spec.AdminKubeconfigSecretRef, specPath.Child("adminKubeconfigSecretRef"))...) + } + if old.Spec.AdminPasswordSecretRef != nil { + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.AdminPasswordSecretRef, old.Spec.AdminPasswordSecretRef, specPath.Child("adminPasswordSecretRef"))...) + } + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.PrevClusterID, old.Spec.PrevClusterID, specPath.Child("prevClusterID"))...) + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.PrevInfraID, old.Spec.PrevInfraID, specPath.Child("prevInfraID"))...) + return allErrs +} + +func validateClusterProvisionSpecInvariants(spec *hivev1.ClusterProvisionSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if spec.ClusterDeploymentRef.Name == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("clusterDeploymentRef", "name"), "must have reference to clusterdeployment")) + } + if spec.Attempt < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("attempt"), spec.Attempt, "attempt number must not be negative")) + } + if !validProvisionStages[spec.Stage] { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("stage"), spec.Stage, validProvisionStageValues)) + } + if len(spec.PodSpec.Containers) == 0 { + if spec.Attempt != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("attempt"), spec.Attempt, "attempt number must not be set for pre-installed cluster")) + } + if spec.Stage != hivev1.ClusterProvisionStageComplete { + allErrs = append(allErrs, field.Invalid(fldPath.Child("stage"), spec.Stage, fmt.Sprintf("stage must be %s for pre-installed cluster", hivev1.ClusterProvisionStageComplete))) + } + if spec.PrevClusterID != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("prevClusterID"), spec.PrevClusterID, "previous cluster ID must not be set for pre-installed cluster")) + } + if spec.PrevInfraID != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("prevInfraID"), spec.PrevInfraID, "previous infra ID must not be set for pre-installed cluster")) + } + } + if spec.Stage == hivev1.ClusterProvisionStageProvisioning || spec.Stage == hivev1.ClusterProvisionStageComplete { + if spec.InfraID == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("infraID"), fmt.Sprintf("infra ID must be set for %s or %s cluster", hivev1.ClusterProvisionStageProvisioning, hivev1.ClusterProvisionStageComplete))) + } + } + if spec.Stage == hivev1.ClusterProvisionStageComplete { + if spec.AdminKubeconfigSecretRef == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("adminKubeConfigSecretRef"), fmt.Sprintf("admin kubeconfig secret must be set for %s cluster", hivev1.ClusterProvisionStageComplete))) + } + if spec.AdminPasswordSecretRef == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("adminPasswordSecretRef"), fmt.Sprintf("admin password secret must be set for %s cluster", hivev1.ClusterProvisionStageComplete))) + } + } + if spec.ClusterID != nil && *spec.ClusterID == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterID"), spec.ClusterID, "cluster ID must not be an empty string")) + } + if spec.InfraID != nil && *spec.InfraID == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("infraID"), spec.InfraID, "infra ID must not be an empty string")) + } + if spec.AdminKubeconfigSecretRef != nil && spec.AdminKubeconfigSecretRef.Name == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("adminKubeConfigSecretRef", "name"), spec.AdminKubeconfigSecretRef.Name, "admin kubeconfig secret must have a non-empty name")) + } + if spec.AdminPasswordSecretRef != nil && spec.AdminPasswordSecretRef.Name == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("adminPasswordSecretRef", "name"), spec.AdminPasswordSecretRef.Name, "admin password secret must have a non-empty name")) + } + if spec.PrevClusterID != nil && *spec.PrevClusterID == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("prevClusterID"), spec.PrevClusterID, "previous cluster ID must not be an empty string")) + } + if spec.PrevInfraID != nil && *spec.PrevInfraID == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("prevInfraID"), spec.PrevInfraID, "previous infra ID must not be an empty string")) + } + return allErrs +} + +func schemaGVK(gvk metav1.GroupVersionKind) schema.GroupVersionKind { + return schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + Kind: gvk.Kind, + } +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/dnszone_validating_admission_hook.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/dnszone_validating_admission_hook.go new file mode 100644 index 00000000000..22b1affcf15 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/dnszone_validating_admission_hook.go @@ -0,0 +1,236 @@ +package validatingwebhooks + +import ( + "fmt" + "net/http" + "strings" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + dnsvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" +) + +const ( + dnsZoneGroup = "hive.openshift.io" + dnsZoneVersion = "v1" + dnsZoneResource = "dnszones" +) + +// DNSZoneValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type DNSZoneValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewDNSZoneValidatingAdmissionHook constructs a new DNSZoneValidatingAdmissionHook +func NewDNSZoneValidatingAdmissionHook(decoder *admission.Decoder) *DNSZoneValidatingAdmissionHook { + return &DNSZoneValidatingAdmissionHook{decoder: decoder} +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/dnszonevalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *DNSZoneValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "dnszonevalidator", + }).Info("Registering validation REST resource") + // NOTE: This GVR is meant to be different than the DNSZone CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: "admission.hive.openshift.io", + Version: "v1", + Resource: "dnszonevalidators", + }, + "dnszonevalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *DNSZoneValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "dnszonevalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *DNSZoneValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + if admissionSpec.Operation == admissionv1beta1.Create { + return a.validateCreate(admissionSpec) + } + + if admissionSpec.Operation == admissionv1beta1.Update { + return a.validateUpdate(admissionSpec) + } + + // We're only validating creates and updates at this time, so all other operations are explicitly allowed. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *DNSZoneValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != dnsZoneGroup { + contextLogger.Debug("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != dnsZoneVersion { + contextLogger.Debug("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != dnsZoneResource { + contextLogger.Debug("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for DNSZone objects. +func (a *DNSZoneValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + newObject := &hivev1.DNSZone{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + strErrs := dnsvalidation.IsDNS1123Subdomain(newObject.Spec.Zone) + if len(strErrs) != 0 { + message := fmt.Sprintf("Failed validation: %v", strings.Join(strErrs, ";")) + contextLogger.Infof(message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdate specifically validates update operations for DNSZone objects. +func (a *DNSZoneValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + newObject := &hivev1.DNSZone{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + oldObject := &hivev1.DNSZone{} + if err := a.decoder.DecodeRaw(admissionSpec.OldObject, oldObject); err != nil { + contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["oldObject.Name"] = oldObject.Name + + if oldObject.Spec.Zone != newObject.Spec.Zone { + message := "DNSZone.Spec.Zone is immutable" + contextLogger.Infof("Failed validation: %v", message) + + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/feature_gates.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/feature_gates.go new file mode 100644 index 00000000000..82b341649d7 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/feature_gates.go @@ -0,0 +1,60 @@ +package validatingwebhooks + +import ( + "fmt" + "os" + "strings" + + "github.com/stretchr/testify/assert" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" + "github.com/openshift/hive/pkg/constants" +) + +type featureSet struct { + *hivev1.FeatureGatesEnabled +} + +func (fs *featureSet) IsEnabled(featureGate string) bool { + s := sets.NewString(fs.FeatureGatesEnabled.Enabled...) + return s.Has(featureGate) +} + +func newFeatureSet() *featureSet { + return &featureSet{ + FeatureGatesEnabled: &hivev1.FeatureGatesEnabled{ + Enabled: strings.Split(os.Getenv(constants.HiveFeatureGatesEnabledEnvVar), ","), + }, + } +} + +// existsOnlyWhenFeatureGate ensures that the fieldPath specified in the obj is only set when the featureGate is enabled. +// NOTE: the path to the field cannot include array / slice. +func existsOnlyWhenFeatureGate(fs *featureSet, obj *unstructured.Unstructured, fieldPath string, featureGate string) field.ErrorList { + allErrs := field.ErrorList{} + + p := strings.Split(fieldPath, ".") + _, found, err := unstructured.NestedFieldNoCopy(obj.Object, p...) + if err == nil && found && !fs.IsEnabled(featureGate) { + return append(allErrs, field.Forbidden(field.NewPath(fieldPath), fmt.Sprintf("should only be set when feature gate %s is enabled", featureGate))) + } + return allErrs +} + +// equalOnlyWhenFeatureGate ensures that the fieldPath specified in the obj is equal to the expected value when +// the featureGate is enabled. +// NOTE: the path to the field cannot include array / slice. +func equalOnlyWhenFeatureGate(fs *featureSet, obj *unstructured.Unstructured, fieldPath string, featureGate string, expected interface{}) field.ErrorList { + allErrs := field.ErrorList{} + + p := strings.Split(fieldPath, ".") + v, found, err := unstructured.NestedFieldNoCopy(obj.Object, p...) + if err == nil && found && assert.ObjectsAreEqualValues(expected, v) && !fs.IsEnabled(featureGate) { + return append(allErrs, field.NotSupported(field.NewPath(fieldPath), v, nil)) + } + return allErrs +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/machinepool_validating_admission_hook.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/machinepool_validating_admission_hook.go new file mode 100644 index 00000000000..cf63582e686 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/machinepool_validating_admission_hook.go @@ -0,0 +1,412 @@ +package validatingwebhooks + +import ( + "fmt" + "net/http" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metavalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" + hivev1aws "github.com/openshift/hive/pkg/apis/hive/v1/aws" + hivev1azure "github.com/openshift/hive/pkg/apis/hive/v1/azure" + hivev1gcp "github.com/openshift/hive/pkg/apis/hive/v1/gcp" + hivev1openstack "github.com/openshift/hive/pkg/apis/hive/v1/openstack" + hivev1ovirt "github.com/openshift/hive/pkg/apis/hive/v1/ovirt" + hivev1vsphere "github.com/openshift/hive/pkg/apis/hive/v1/vsphere" +) + +const ( + machinePoolGroup = "hive.openshift.io" + machinePoolVersion = "v1" + machinePoolResource = "machinepools" + + defaultMasterPoolName = "master" + defaultWorkerPoolName = "worker" + legacyWorkerPoolName = "w" +) + +// MachinePoolValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type MachinePoolValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewMachinePoolValidatingAdmissionHook constructs a new MachinePoolValidatingAdmissionHook +func NewMachinePoolValidatingAdmissionHook(decoder *admission.Decoder) *MachinePoolValidatingAdmissionHook { + return &MachinePoolValidatingAdmissionHook{decoder: decoder} +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/machinePoolvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *MachinePoolValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "machinepoolvalidator", + }).Info("Registering validation REST resource") + // NOTE: This GVR is meant to be different than the MachinePool CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: "admission.hive.openshift.io", + Version: "v1", + Resource: "machinepoolvalidators", + }, + "machinepoolvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *MachinePoolValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "machinepoolvalidator", + }).Info("Initializing validation REST resource") + + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *MachinePoolValidatingAdmissionHook) Validate(request *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + logger := log.WithFields(log.Fields{ + "operation": request.Operation, + "group": request.Resource.Group, + "version": request.Resource.Version, + "resource": request.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(request, logger) { + logger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + logger.Info("Validating request") + + switch request.Operation { + case admissionv1beta1.Create: + return a.validateCreateRequest(request, logger) + case admissionv1beta1.Update: + return a.validateUpdateRequest(request, logger) + default: + logger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *MachinePoolValidatingAdmissionHook) shouldValidate(request *admissionv1beta1.AdmissionRequest, logger log.FieldLogger) bool { + logger = logger.WithField("method", "shouldValidate") + + if request.Resource.Group != machinePoolGroup { + logger.Debug("Returning False, not our group") + return false + } + + if request.Resource.Version != machinePoolVersion { + logger.Debug("Returning False, it's our group, but not the right version") + return false + } + + if request.Resource.Resource != machinePoolResource { + logger.Debug("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + logger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreateRequest specifically validates create operations for MachinePool objects. +func (a *MachinePoolValidatingAdmissionHook) validateCreateRequest(request *admissionv1beta1.AdmissionRequest, logger log.FieldLogger) *admissionv1beta1.AdmissionResponse { + logger = logger.WithField("method", "validateCreateRequest") + + newObject, resp := a.decode(request.Object, logger.WithField("decode", "Object")) + if resp != nil { + return resp + } + + logger = logger. + WithField("object.Name", newObject.Name). + WithField("object.Namespace", newObject.Namespace) + + if allErrs := validateMachinePoolCreate(newObject); len(allErrs) > 0 { + logger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(request.Kind).GroupKind(), request.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + logger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdateRequest specifically validates update operations for MachinePool objects. +func (a *MachinePoolValidatingAdmissionHook) validateUpdateRequest(request *admissionv1beta1.AdmissionRequest, logger log.FieldLogger) *admissionv1beta1.AdmissionResponse { + logger = logger.WithField("method", "validateUpdateRequest") + + newObject, resp := a.decode(request.Object, logger.WithField("decode", "Object")) + if resp != nil { + return resp + } + + logger = logger. + WithField("object.Name", newObject.Name). + WithField("object.Namespace", newObject.Namespace) + + oldObject, resp := a.decode(request.OldObject, logger.WithField("decode", "OldObject")) + if resp != nil { + return resp + } + + if allErrs := validateMachinePoolUpdate(oldObject, newObject); len(allErrs) > 0 { + logger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(request.Kind).GroupKind(), request.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + logger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +func (a *MachinePoolValidatingAdmissionHook) decode(raw runtime.RawExtension, logger log.FieldLogger) (*hivev1.MachinePool, *admissionv1beta1.AdmissionResponse) { + obj := &hivev1.MachinePool{} + if err := a.decoder.DecodeRaw(raw, obj); err != nil { + logger.WithError(err).Error("failed to decode") + return nil, &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + return obj, nil +} + +func validateMachinePoolCreate(pool *hivev1.MachinePool) field.ErrorList { + return validateMachinePoolInvariants(pool) +} + +func validateMachinePoolUpdate(old, new *hivev1.MachinePool) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateMachinePoolInvariants(new)...) + specPath := field.NewPath("spec") + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.ClusterDeploymentRef, old.Spec.ClusterDeploymentRef, specPath.Child("clusterDeploymentRef"))...) + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.Name, old.Spec.Name, specPath.Child("name"))...) + allErrs = append(allErrs, validation.ValidateImmutableField(new.Spec.Platform, old.Spec.Platform, specPath.Child("platform"))...) + return allErrs +} + +func validateMachinePoolName(pool *hivev1.MachinePool) field.ErrorList { + allErrs := field.ErrorList{} + if pool.Name != fmt.Sprintf("%s-%s", pool.Spec.ClusterDeploymentRef.Name, pool.Spec.Name) { + allErrs = append(allErrs, field.Invalid(field.NewPath("metadata", "name"), pool.Name, "name must be ${CD_NAME}-${POOL_NAME}, where ${CD_NAME} is the name of the clusterdeployment and ${POOL_NAME} is the name of the remote machine pool")) + } + for _, invalidName := range []string{defaultMasterPoolName, legacyWorkerPoolName} { + if pool.Spec.Name == invalidName { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "name"), pool.Spec.Name, fmt.Sprintf("pool name cannot be %q", invalidName))) + } + } + return allErrs +} + +func validateMachinePoolInvariants(pool *hivev1.MachinePool) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateMachinePoolName(pool)...) + allErrs = append(allErrs, validateMachinePoolSpecInvariants(&pool.Spec, field.NewPath("spec"))...) + return allErrs +} + +func validateMachinePoolSpecInvariants(spec *hivev1.MachinePoolSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if spec.ClusterDeploymentRef.Name == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("clusterDeploymentRef", "name"), "must have reference to clusterdeployment")) + } + if spec.Name == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "must have a name for the remote machine pool")) + } + if spec.Replicas != nil { + if spec.Autoscaling != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("replicas"), *spec.Replicas, "replicas must not be specified when autoscaling is specified")) + } + if *spec.Replicas < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("replicas"), *spec.Replicas, "replicas count must not be negative")) + } + } + platformPath := fldPath.Child("platform") + platforms := []string{} + numberOfMachineSets := 0 + if p := spec.Platform.AWS; p != nil { + platforms = append(platforms, "aws") + allErrs = append(allErrs, validateAWSMachinePoolPlatformInvariants(p, platformPath.Child("aws"))...) + numberOfMachineSets = len(p.Zones) + } + if p := spec.Platform.Azure; p != nil { + platforms = append(platforms, "azure") + allErrs = append(allErrs, validateAzureMachinePoolPlatformInvariants(p, platformPath.Child("azure"))...) + numberOfMachineSets = len(p.Zones) + } + if p := spec.Platform.GCP; p != nil { + platforms = append(platforms, "gcp") + allErrs = append(allErrs, validateGCPMachinePoolPlatformInvariants(p, platformPath.Child("gcp"))...) + numberOfMachineSets = len(p.Zones) + } + if p := spec.Platform.OpenStack; p != nil { + platforms = append(platforms, "openstack") + allErrs = append(allErrs, validateOpenStackMachinePoolPlatformInvariants(p, platformPath.Child("openstack"))...) + } + if p := spec.Platform.VSphere; p != nil { + platforms = append(platforms, "vsphere") + allErrs = append(allErrs, validateVSphereMachinePoolPlatformInvariants(p, platformPath.Child("vsphere"))...) + } + if p := spec.Platform.Ovirt; p != nil { + platforms = append(platforms, "ovirt") + allErrs = append(allErrs, validateOvirtMachinePoolPlatformInvariants(p, platformPath.Child("ovirt"))...) + } + + switch len(platforms) { + case 0: + allErrs = append(allErrs, field.Required(platformPath, "must specify a platform")) + case 1: + // valid + default: + allErrs = append(allErrs, field.Invalid(platformPath, spec.Platform, fmt.Sprintf("multiple platforms specified: %s", platforms))) + } + if spec.Autoscaling != nil { + autoscalingPath := fldPath.Child("autoscaling") + if numberOfMachineSets == 0 { + if spec.Autoscaling.MinReplicas < 1 { + allErrs = append(allErrs, field.Invalid(autoscalingPath.Child("minReplicas"), spec.Autoscaling.MinReplicas, "minimum replicas must at least 1")) + } + } else { + if spec.Autoscaling.MinReplicas < int32(numberOfMachineSets) { + allErrs = append(allErrs, field.Invalid(autoscalingPath.Child("minReplicas"), spec.Autoscaling.MinReplicas, "minimum replicas must be at least the number of zones")) + } + } + if spec.Autoscaling.MinReplicas > spec.Autoscaling.MaxReplicas { + allErrs = append(allErrs, field.Invalid(autoscalingPath.Child("minReplicas"), spec.Autoscaling.MinReplicas, "minimum replicas must not be greater than maximum replicas")) + } + } + allErrs = append(allErrs, metavalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) + return allErrs +} + +func validateAWSMachinePoolPlatformInvariants(platform *hivev1aws.MachinePoolPlatform, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, zone := range platform.Zones { + if zone == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("zones").Index(i), zone, "zone cannot be an empty string")) + } + } + if platform.InstanceType == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("instanceType"), "instance type is required")) + } + rootVolume := &platform.EC2RootVolume + rootVolumePath := fldPath.Child("ec2RootVolume") + if rootVolume.IOPS < 0 { + allErrs = append(allErrs, field.Invalid(rootVolumePath.Child("iops"), rootVolume.IOPS, "volume IOPS must not be negative")) + } + if rootVolume.Size < 0 { + allErrs = append(allErrs, field.Invalid(rootVolumePath.Child("size"), rootVolume.Size, "volume size must not be negative")) + } + if rootVolume.Type == "" { + allErrs = append(allErrs, field.Required(rootVolumePath.Child("type"), "volume type is required")) + } + return allErrs +} + +func validateGCPMachinePoolPlatformInvariants(platform *hivev1gcp.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, zone := range platform.Zones { + if zone == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("zones").Index(i), zone, "zone cannot be an empty string")) + } + } + if platform.InstanceType == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("instanceType"), "instance type is required")) + } + return allErrs +} + +func validateAzureMachinePoolPlatformInvariants(platform *hivev1azure.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, zone := range platform.Zones { + if zone == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("zones").Index(i), zone, "zone cannot be an empty string")) + } + } + if platform.InstanceType == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("instanceType"), "instance type is required")) + } + osDisk := &platform.OSDisk + osDiskPath := fldPath.Child("osDisk") + if osDisk.DiskSizeGB <= 0 { + allErrs = append(allErrs, field.Invalid(osDiskPath.Child("iops"), osDisk.DiskSizeGB, "disk size must be positive")) + } + return allErrs +} + +func validateOpenStackMachinePoolPlatformInvariants(platform *hivev1openstack.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if platform.Flavor == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("type"), "flavor name is required")) + } + return allErrs +} + +func validateVSphereMachinePoolPlatformInvariants(platform *hivev1vsphere.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if platform.NumCPUs <= 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("numCPUs"), "number of cpus must be positive")) + } + + if platform.NumCoresPerSocket <= 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("numCoresPerSocket"), "number of cores per socket must be positive")) + } + + if platform.MemoryMiB <= 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("memoryMiB"), "memory must be positive")) + } + + if platform.OSDisk.DiskSizeGB <= 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("diskSizeGB"), "disk size must be positive")) + } + + return allErrs +} + +func validateOvirtMachinePoolPlatformInvariants(platform *hivev1ovirt.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + return allErrs +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/selector_syncset_validating_admission_hook.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/selector_syncset_validating_admission_hook.go new file mode 100644 index 00000000000..1ed8e0fddf3 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/selector_syncset_validating_admission_hook.go @@ -0,0 +1,224 @@ +package validatingwebhooks + +import ( + "net/http" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" +) + +const ( + selectorSyncSetGroup = "hive.openshift.io" + selectorSyncSetVersion = "v1" + selectorSyncSetResource = "selectorsyncsets" +) + +// SelectorSyncSetValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type SelectorSyncSetValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewSelectorSyncSetValidatingAdmissionHook constructs a new SelectorSyncSetValidatingAdmissionHook +func NewSelectorSyncSetValidatingAdmissionHook(decoder *admission.Decoder) *SelectorSyncSetValidatingAdmissionHook { + return &SelectorSyncSetValidatingAdmissionHook{decoder: decoder} +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/selectorsyncsetvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *SelectorSyncSetValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "selectorsyncsetvalidator", + }).Info("Registering validation REST resource") + // NOTE: This GVR is meant to be different than the SelectorSyncSet CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: "admission.hive.openshift.io", + Version: "v1", + Resource: "selectorsyncsetvalidators", + }, + "selectorsyncsetvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *SelectorSyncSetValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "selectorsyncsetvalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *SelectorSyncSetValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + if admissionSpec.Operation == admissionv1beta1.Create { + return a.validateCreate(admissionSpec) + } + + if admissionSpec.Operation == admissionv1beta1.Update { + return a.validateUpdate(admissionSpec) + } + + // We're only validating creates and updates at this time, so all other operations are explicitly allowed. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *SelectorSyncSetValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != selectorSyncSetGroup { + contextLogger.Debug("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != selectorSyncSetVersion { + contextLogger.Debug("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != selectorSyncSetResource { + contextLogger.Debug("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for SelectorSyncSet objects. +func (a *SelectorSyncSetValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + newObject := &hivev1.SelectorSyncSet{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateResources(newObject.Spec.Resources, field.NewPath("spec").Child("resources"))...) + allErrs = append(allErrs, validatePatches(newObject.Spec.Patches, field.NewPath("spec").Child("patches"))...) + allErrs = append(allErrs, validateSecrets(newObject.Spec.Secrets, field.NewPath("spec").Child("secretMappings"))...) + allErrs = append(allErrs, validateResourceApplyMode(newObject.Spec.ResourceApplyMode, field.NewPath("spec", "resourceApplyMode"))...) + + if len(allErrs) > 0 { + statusError := errors.NewInvalid(newObject.GroupVersionKind().GroupKind(), newObject.Name, allErrs).Status() + contextLogger.Infof(statusError.Message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &statusError, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdate specifically validates update operations for SelectorSyncSet objects. +func (a *SelectorSyncSetValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + newObject := &hivev1.SelectorSyncSet{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateResources(newObject.Spec.Resources, field.NewPath("spec", "resources"))...) + allErrs = append(allErrs, validatePatches(newObject.Spec.Patches, field.NewPath("spec", "patches"))...) + allErrs = append(allErrs, validateSecrets(newObject.Spec.Secrets, field.NewPath("spec", "secretMappings"))...) + allErrs = append(allErrs, validateResourceApplyMode(newObject.Spec.ResourceApplyMode, field.NewPath("spec", "resourceApplyMode"))...) + + if len(allErrs) > 0 { + statusError := errors.NewInvalid(newObject.GroupVersionKind().GroupKind(), newObject.Name, allErrs).Status() + contextLogger.Infof(statusError.Message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &statusError, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/syncset_validating_admission_hook.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/syncset_validating_admission_hook.go new file mode 100644 index 00000000000..d172033e5fb --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks/syncset_validating_admission_hook.go @@ -0,0 +1,336 @@ +package validatingwebhooks + +import ( + "encoding/json" + "net/http" + + log "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" +) + +const ( + syncSetGroup = "hive.openshift.io" + syncSetVersion = "v1" + syncSetResource = "syncsets" +) + +var invalidResourceGroupKinds = map[string]map[string]bool{ + "authorization.openshift.io": { + "Role": true, + "RoleBinding": true, + "ClusterRole": true, + "ClusterRoleBinding": true, + "SubjectAccessReview": true, + }, +} + +var validPatchTypes = map[string]bool{ + "json": true, + "merge": true, + "strategic": true, +} + +var validPatchTypeSlice = []string{"json", "merge", "strategic"} + +var ( + validResourceApplyModes = map[hivev1.SyncSetResourceApplyMode]bool{ + hivev1.UpsertResourceApplyMode: true, + hivev1.SyncResourceApplyMode: true, + } + + validResourceApplyModeSlice = func() []string { + v := make([]string, 0, len(validResourceApplyModes)) + for m := range validResourceApplyModes { + v = append(v, string(m)) + } + return v + }() +) + +// SyncSetValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type SyncSetValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewSyncSetValidatingAdmissionHook constructs a new SyncSetValidatingAdmissionHook +func NewSyncSetValidatingAdmissionHook(decoder *admission.Decoder) *SyncSetValidatingAdmissionHook { + return &SyncSetValidatingAdmissionHook{decoder: decoder} +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/syncsetvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *SyncSetValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "syncsetvalidator", + }).Info("Registering validation REST resource") + // NOTE: This GVR is meant to be different than the SyncSet CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: "admission.hive.openshift.io", + Version: "v1", + Resource: "syncsetvalidators", + }, + "syncsetvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *SyncSetValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": "admission.hive.openshift.io", + "version": "v1", + "resource": "syncsetvalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *SyncSetValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + if admissionSpec.Operation == admissionv1beta1.Create { + return a.validateCreate(admissionSpec) + } + + if admissionSpec.Operation == admissionv1beta1.Update { + return a.validateUpdate(admissionSpec) + } + + // We're only validating creates and updates at this time, so all other operations are explicitly allowed. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *SyncSetValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != syncSetGroup { + contextLogger.Debug("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != syncSetVersion { + contextLogger.Debug("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != syncSetResource { + contextLogger.Debug("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for SyncSet objects. +func (a *SyncSetValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + newObject := &hivev1.SyncSet{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateResources(newObject.Spec.Resources, field.NewPath("spec").Child("resources"))...) + allErrs = append(allErrs, validatePatches(newObject.Spec.Patches, field.NewPath("spec").Child("patches"))...) + allErrs = append(allErrs, validateSecrets(newObject.Spec.Secrets, field.NewPath("spec").Child("secretMappings"))...) + allErrs = append(allErrs, validateSourceSecretInSyncSetNamespace(newObject.Spec.Secrets, newObject.Namespace, field.NewPath("spec", "secretMappings"))...) + allErrs = append(allErrs, validateResourceApplyMode(newObject.Spec.ResourceApplyMode, field.NewPath("spec", "resourceApplyMode"))...) + + if len(allErrs) > 0 { + statusError := errors.NewInvalid(newObject.GroupVersionKind().GroupKind(), newObject.Name, allErrs).Status() + contextLogger.Infof(statusError.Message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &statusError, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdate specifically validates update operations for SyncSet objects. +func (a *SyncSetValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + newObject := &hivev1.SyncSet{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateResources(newObject.Spec.Resources, field.NewPath("spec", "resources"))...) + allErrs = append(allErrs, validatePatches(newObject.Spec.Patches, field.NewPath("spec", "patches"))...) + allErrs = append(allErrs, validateSecrets(newObject.Spec.Secrets, field.NewPath("spec", "secretMappings"))...) + allErrs = append(allErrs, validateSourceSecretInSyncSetNamespace(newObject.Spec.Secrets, newObject.Namespace, field.NewPath("spec", "secretMappings"))...) + allErrs = append(allErrs, validateResourceApplyMode(newObject.Spec.ResourceApplyMode, field.NewPath("spec", "resourceApplyMode"))...) + + if len(allErrs) > 0 { + statusError := errors.NewInvalid(newObject.GroupVersionKind().GroupKind(), newObject.Name, allErrs).Status() + contextLogger.Infof(statusError.Message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &statusError, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +func validatePatches(patches []hivev1.SyncObjectPatch, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, patch := range patches { + if !validPatchTypes[patch.PatchType] { + allErrs = append(allErrs, field.NotSupported(fldPath.Index(i).Child("PatchType"), patch.PatchType, validPatchTypeSlice)) + } + } + return allErrs +} + +func validateResourceApplyMode(resourceApplyMode hivev1.SyncSetResourceApplyMode, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if resourceApplyMode != "" && !validResourceApplyModes[resourceApplyMode] { + allErrs = append(allErrs, field.NotSupported(fldPath, resourceApplyMode, validResourceApplyModeSlice)) + } + return allErrs +} + +func validateResources(resources []runtime.RawExtension, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, resource := range resources { + allErrs = append(allErrs, validateResource(resource, fldPath.Index(i))...) + } + return allErrs +} + +func validateResource(resource runtime.RawExtension, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + u := &unstructured.Unstructured{} + err := json.Unmarshal(resource.Raw, u) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, resource.Raw, "Unable to unmarshal resource")) + return allErrs + } + + if invalidResourceGroupKinds[u.GroupVersionKind().Group][u.GetKind()] { + allErrs = append(allErrs, field.Invalid(fldPath.Child("APIVersion"), u.GetAPIVersion(), "must use kubernetes group for this resource kind")) + } + + return allErrs +} + +func validateSecrets(secrets []hivev1.SecretMapping, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, secret := range secrets { + allErrs = append(allErrs, validateSecretRef(secret.SourceRef, fldPath.Index(i).Child("sourceRef"))...) + allErrs = append(allErrs, validateSecretRef(secret.TargetRef, fldPath.Index(i).Child("targetRef"))...) + } + return allErrs +} + +func validateSourceSecretInSyncSetNamespace(secrets []hivev1.SecretMapping, syncSetNS string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, secret := range secrets { + if secret.SourceRef.Namespace != syncSetNS && secret.SourceRef.Namespace != "" { + path := fldPath.Index(i).Child("sourceRef") + + allErrs = append(allErrs, field.Invalid(path.Child("namespace"), secret.SourceRef.Namespace, + "source secret reference must be in same namespace as SyncSet")) + } + } + return allErrs +} + +func validateSecretRef(ref hivev1.SecretReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(ref.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "Name is required")) + } + return allErrs +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/doc.go new file mode 100644 index 00000000000..d37553ddc87 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/doc.go @@ -0,0 +1,3 @@ +// Package vsphere contains contains API Schema definitions for vSphere clusters. +// +k8s:deepcopy-gen=package,register +package vsphere diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/machinepools.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/machinepools.go new file mode 100644 index 00000000000..95ca854eb20 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/machinepools.go @@ -0,0 +1,24 @@ +package vsphere + +// MachinePool stores the configuration for a machine pool installed +// on vSphere. +type MachinePool struct { + // NumCPUs is the total number of virtual processor cores to assign a vm. + NumCPUs int32 `json:"cpus"` + + // NumCoresPerSocket is the number of cores per socket in a vm. The number + // of vCPUs on the vm will be NumCPUs/NumCoresPerSocket. + NumCoresPerSocket int32 `json:"coresPerSocket"` + + // Memory is the size of a VM's memory in MB. + MemoryMiB int64 `json:"memoryMB"` + + // OSDisk defines the storage for instance. + OSDisk `json:"osDisk"` +} + +// OSDisk defines the disk for a virtual machine. +type OSDisk struct { + // DiskSizeGB defines the size of disk in GB. + DiskSizeGB int32 `json:"diskSizeGB"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/platform.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/platform.go new file mode 100644 index 00000000000..62f1f84372d --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/platform.go @@ -0,0 +1,35 @@ +package vsphere + +import ( + corev1 "k8s.io/api/core/v1" +) + +// Platform stores any global configuration used for vSphere platforms. +type Platform struct { + // VCenter is the domain name or IP address of the vCenter. + VCenter string `json:"vCenter"` + + // CredentialsSecretRef refers to a secret that contains the vSphere account access + // credentials: GOVC_USERNAME, GOVC_PASSWORD fields. + CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"` + + // CertificatesSecretRef refers to a secret that contains the vSphere CA certificates + // necessary for communicating with the VCenter. + CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"` + + // Datacenter is the name of the datacenter to use in the vCenter. + Datacenter string `json:"datacenter"` + + // DefaultDatastore is the default datastore to use for provisioning volumes. + DefaultDatastore string `json:"defaultDatastore"` + + // Folder is the name of the folder that will be used and/or created for + // virtual machines. + Folder string `json:"folder,omitempty"` + + // Cluster is the name of the cluster virtual machines will be cloned into. + Cluster string `json:"cluster,omitempty"` + + // Network specifies the name of the network to be used by the cluster. + Network string `json:"network,omitempty"` +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/zz_generated.deepcopy.go new file mode 100644 index 00000000000..b23274b7b77 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/vsphere/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package vsphere + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + out.OSDisk = in.OSDisk + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + out.CertificatesSecretRef = in.CertificatesSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..f6d5fea363c --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hive/v1/zz_generated.deepcopy.go @@ -0,0 +1,3366 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + agent "github.com/openshift/hive/pkg/apis/hive/v1/agent" + aws "github.com/openshift/hive/pkg/apis/hive/v1/aws" + azure "github.com/openshift/hive/pkg/apis/hive/v1/azure" + baremetal "github.com/openshift/hive/pkg/apis/hive/v1/baremetal" + gcp "github.com/openshift/hive/pkg/apis/hive/v1/gcp" + openstack "github.com/openshift/hive/pkg/apis/hive/v1/openstack" + ovirt "github.com/openshift/hive/pkg/apis/hive/v1/ovirt" + vsphere "github.com/openshift/hive/pkg/apis/hive/v1/vsphere" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterDeprovision) DeepCopyInto(out *AWSClusterDeprovision) { + *out = *in + if in.CredentialsSecretRef != nil { + in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterDeprovision. +func (in *AWSClusterDeprovision) DeepCopy() *AWSClusterDeprovision { + if in == nil { + return nil + } + out := new(AWSClusterDeprovision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSDNSZoneSpec) DeepCopyInto(out *AWSDNSZoneSpec) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSDNSZoneSpec. +func (in *AWSDNSZoneSpec) DeepCopy() *AWSDNSZoneSpec { + if in == nil { + return nil + } + out := new(AWSDNSZoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSDNSZoneStatus) DeepCopyInto(out *AWSDNSZoneStatus) { + *out = *in + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSDNSZoneStatus. +func (in *AWSDNSZoneStatus) DeepCopy() *AWSDNSZoneStatus { + if in == nil { + return nil + } + out := new(AWSDNSZoneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. +func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { + if in == nil { + return nil + } + out := new(AWSResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureClusterDeprovision) DeepCopyInto(out *AzureClusterDeprovision) { + *out = *in + if in.CredentialsSecretRef != nil { + in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureClusterDeprovision. +func (in *AzureClusterDeprovision) DeepCopy() *AzureClusterDeprovision { + if in == nil { + return nil + } + out := new(AzureClusterDeprovision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDNSZoneSpec) DeepCopyInto(out *AzureDNSZoneSpec) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDNSZoneSpec. +func (in *AzureDNSZoneSpec) DeepCopy() *AzureDNSZoneSpec { + if in == nil { + return nil + } + out := new(AzureDNSZoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDNSZoneStatus) DeepCopyInto(out *AzureDNSZoneStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDNSZoneStatus. +func (in *AzureDNSZoneStatus) DeepCopy() *AzureDNSZoneStatus { + if in == nil { + return nil + } + out := new(AzureDNSZoneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupConfig) DeepCopyInto(out *BackupConfig) { + *out = *in + out.Velero = in.Velero + if in.MinBackupPeriodSeconds != nil { + in, out := &in.MinBackupPeriodSeconds, &out.MinBackupPeriodSeconds + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfig. +func (in *BackupConfig) DeepCopy() *BackupConfig { + if in == nil { + return nil + } + out := new(BackupConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupReference) DeepCopyInto(out *BackupReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupReference. +func (in *BackupReference) DeepCopy() *BackupReference { + if in == nil { + return nil + } + out := new(BackupReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateBundleSpec) DeepCopyInto(out *CertificateBundleSpec) { + *out = *in + out.CertificateSecretRef = in.CertificateSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateBundleSpec. +func (in *CertificateBundleSpec) DeepCopy() *CertificateBundleSpec { + if in == nil { + return nil + } + out := new(CertificateBundleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateBundleStatus) DeepCopyInto(out *CertificateBundleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateBundleStatus. +func (in *CertificateBundleStatus) DeepCopy() *CertificateBundleStatus { + if in == nil { + return nil + } + out := new(CertificateBundleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Checkpoint) DeepCopyInto(out *Checkpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Checkpoint. +func (in *Checkpoint) DeepCopy() *Checkpoint { + if in == nil { + return nil + } + out := new(Checkpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Checkpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckpointList) DeepCopyInto(out *CheckpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Checkpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckpointList. +func (in *CheckpointList) DeepCopy() *CheckpointList { + if in == nil { + return nil + } + out := new(CheckpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CheckpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckpointSpec) DeepCopyInto(out *CheckpointSpec) { + *out = *in + in.LastBackupTime.DeepCopyInto(&out.LastBackupTime) + out.LastBackupRef = in.LastBackupRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckpointSpec. +func (in *CheckpointSpec) DeepCopy() *CheckpointSpec { + if in == nil { + return nil + } + out := new(CheckpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckpointStatus) DeepCopyInto(out *CheckpointStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckpointStatus. +func (in *CheckpointStatus) DeepCopy() *CheckpointStatus { + if in == nil { + return nil + } + out := new(CheckpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClaim) DeepCopyInto(out *ClusterClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaim. +func (in *ClusterClaim) DeepCopy() *ClusterClaim { + if in == nil { + return nil + } + out := new(ClusterClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClaimCondition) DeepCopyInto(out *ClusterClaimCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimCondition. +func (in *ClusterClaimCondition) DeepCopy() *ClusterClaimCondition { + if in == nil { + return nil + } + out := new(ClusterClaimCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClaimList) DeepCopyInto(out *ClusterClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimList. +func (in *ClusterClaimList) DeepCopy() *ClusterClaimList { + if in == nil { + return nil + } + out := new(ClusterClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClaimSpec) DeepCopyInto(out *ClusterClaimSpec) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbacv1.Subject, len(*in)) + copy(*out, *in) + } + if in.Lifetime != nil { + in, out := &in.Lifetime, &out.Lifetime + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimSpec. +func (in *ClusterClaimSpec) DeepCopy() *ClusterClaimSpec { + if in == nil { + return nil + } + out := new(ClusterClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClaimStatus) DeepCopyInto(out *ClusterClaimStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterClaimCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Lifetime != nil { + in, out := &in.Lifetime, &out.Lifetime + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimStatus. +func (in *ClusterClaimStatus) DeepCopy() *ClusterClaimStatus { + if in == nil { + return nil + } + out := new(ClusterClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeployment) DeepCopyInto(out *ClusterDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeployment. +func (in *ClusterDeployment) DeepCopy() *ClusterDeployment { + if in == nil { + return nil + } + out := new(ClusterDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCondition) DeepCopyInto(out *ClusterDeploymentCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCondition. +func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition { + if in == nil { + return nil + } + out := new(ClusterDeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentList. +func (in *ClusterDeploymentList) DeepCopy() *ClusterDeploymentList { + if in == nil { + return nil + } + out := new(ClusterDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentSpec) DeepCopyInto(out *ClusterDeploymentSpec) { + *out = *in + in.Platform.DeepCopyInto(&out.Platform) + if in.PullSecretRef != nil { + in, out := &in.PullSecretRef, &out.PullSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + in.ControlPlaneConfig.DeepCopyInto(&out.ControlPlaneConfig) + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]ClusterIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CertificateBundles != nil { + in, out := &in.CertificateBundles, &out.CertificateBundles + *out = make([]CertificateBundleSpec, len(*in)) + copy(*out, *in) + } + if in.ClusterMetadata != nil { + in, out := &in.ClusterMetadata, &out.ClusterMetadata + *out = new(ClusterMetadata) + **out = **in + } + if in.Provisioning != nil { + in, out := &in.Provisioning, &out.Provisioning + *out = new(Provisioning) + (*in).DeepCopyInto(*out) + } + if in.ClusterPoolRef != nil { + in, out := &in.ClusterPoolRef, &out.ClusterPoolRef + *out = new(ClusterPoolReference) + **out = **in + } + if in.HibernateAfter != nil { + in, out := &in.HibernateAfter, &out.HibernateAfter + *out = new(metav1.Duration) + **out = **in + } + if in.InstallAttemptsLimit != nil { + in, out := &in.InstallAttemptsLimit, &out.InstallAttemptsLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentSpec. +func (in *ClusterDeploymentSpec) DeepCopy() *ClusterDeploymentSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentStatus) DeepCopyInto(out *ClusterDeploymentStatus) { + *out = *in + if in.InstallerImage != nil { + in, out := &in.InstallerImage, &out.InstallerImage + *out = new(string) + **out = **in + } + if in.CLIImage != nil { + in, out := &in.CLIImage, &out.CLIImage + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterDeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CertificateBundles != nil { + in, out := &in.CertificateBundles, &out.CertificateBundles + *out = make([]CertificateBundleStatus, len(*in)) + copy(*out, *in) + } + if in.InstallStartedTimestamp != nil { + in, out := &in.InstallStartedTimestamp, &out.InstallStartedTimestamp + *out = (*in).DeepCopy() + } + if in.InstalledTimestamp != nil { + in, out := &in.InstalledTimestamp, &out.InstalledTimestamp + *out = (*in).DeepCopy() + } + if in.ProvisionRef != nil { + in, out := &in.ProvisionRef, &out.ProvisionRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.InstallStrategy != nil { + in, out := &in.InstallStrategy, &out.InstallStrategy + *out = new(InstallStrategyStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentStatus. +func (in *ClusterDeploymentStatus) DeepCopy() *ClusterDeploymentStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeprovision) DeepCopyInto(out *ClusterDeprovision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeprovision. +func (in *ClusterDeprovision) DeepCopy() *ClusterDeprovision { + if in == nil { + return nil + } + out := new(ClusterDeprovision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeprovision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeprovisionCondition) DeepCopyInto(out *ClusterDeprovisionCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeprovisionCondition. +func (in *ClusterDeprovisionCondition) DeepCopy() *ClusterDeprovisionCondition { + if in == nil { + return nil + } + out := new(ClusterDeprovisionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeprovisionList) DeepCopyInto(out *ClusterDeprovisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeprovision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeprovisionList. +func (in *ClusterDeprovisionList) DeepCopy() *ClusterDeprovisionList { + if in == nil { + return nil + } + out := new(ClusterDeprovisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeprovisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeprovisionPlatform) DeepCopyInto(out *ClusterDeprovisionPlatform) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSClusterDeprovision) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureClusterDeprovision) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPClusterDeprovision) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackClusterDeprovision) + (*in).DeepCopyInto(*out) + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSphereClusterDeprovision) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtClusterDeprovision) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeprovisionPlatform. +func (in *ClusterDeprovisionPlatform) DeepCopy() *ClusterDeprovisionPlatform { + if in == nil { + return nil + } + out := new(ClusterDeprovisionPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeprovisionSpec) DeepCopyInto(out *ClusterDeprovisionSpec) { + *out = *in + in.Platform.DeepCopyInto(&out.Platform) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeprovisionSpec. +func (in *ClusterDeprovisionSpec) DeepCopy() *ClusterDeprovisionSpec { + if in == nil { + return nil + } + out := new(ClusterDeprovisionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeprovisionStatus) DeepCopyInto(out *ClusterDeprovisionStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterDeprovisionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeprovisionStatus. +func (in *ClusterDeprovisionStatus) DeepCopy() *ClusterDeprovisionStatus { + if in == nil { + return nil + } + out := new(ClusterDeprovisionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImageSet) DeepCopyInto(out *ClusterImageSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImageSet. +func (in *ClusterImageSet) DeepCopy() *ClusterImageSet { + if in == nil { + return nil + } + out := new(ClusterImageSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImageSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImageSetList) DeepCopyInto(out *ClusterImageSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterImageSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImageSetList. +func (in *ClusterImageSetList) DeepCopy() *ClusterImageSetList { + if in == nil { + return nil + } + out := new(ClusterImageSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImageSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImageSetReference) DeepCopyInto(out *ClusterImageSetReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImageSetReference. +func (in *ClusterImageSetReference) DeepCopy() *ClusterImageSetReference { + if in == nil { + return nil + } + out := new(ClusterImageSetReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImageSetSpec) DeepCopyInto(out *ClusterImageSetSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImageSetSpec. +func (in *ClusterImageSetSpec) DeepCopy() *ClusterImageSetSpec { + if in == nil { + return nil + } + out := new(ClusterImageSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImageSetStatus) DeepCopyInto(out *ClusterImageSetStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImageSetStatus. +func (in *ClusterImageSetStatus) DeepCopy() *ClusterImageSetStatus { + if in == nil { + return nil + } + out := new(ClusterImageSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterIngress) DeepCopyInto(out *ClusterIngress) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RouteSelector != nil { + in, out := &in.RouteSelector, &out.RouteSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterIngress. +func (in *ClusterIngress) DeepCopy() *ClusterIngress { + if in == nil { + return nil + } + out := new(ClusterIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMetadata) DeepCopyInto(out *ClusterMetadata) { + *out = *in + out.AdminKubeconfigSecretRef = in.AdminKubeconfigSecretRef + out.AdminPasswordSecretRef = in.AdminPasswordSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMetadata. +func (in *ClusterMetadata) DeepCopy() *ClusterMetadata { + if in == nil { + return nil + } + out := new(ClusterMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorState) DeepCopyInto(out *ClusterOperatorState) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]configv1.ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorState. +func (in *ClusterOperatorState) DeepCopy() *ClusterOperatorState { + if in == nil { + return nil + } + out := new(ClusterOperatorState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPool) DeepCopyInto(out *ClusterPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPool. +func (in *ClusterPool) DeepCopy() *ClusterPool { + if in == nil { + return nil + } + out := new(ClusterPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPoolClaimLifetime) DeepCopyInto(out *ClusterPoolClaimLifetime) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(metav1.Duration) + **out = **in + } + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPoolClaimLifetime. +func (in *ClusterPoolClaimLifetime) DeepCopy() *ClusterPoolClaimLifetime { + if in == nil { + return nil + } + out := new(ClusterPoolClaimLifetime) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPoolCondition) DeepCopyInto(out *ClusterPoolCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPoolCondition. +func (in *ClusterPoolCondition) DeepCopy() *ClusterPoolCondition { + if in == nil { + return nil + } + out := new(ClusterPoolCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPoolList) DeepCopyInto(out *ClusterPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPoolList. +func (in *ClusterPoolList) DeepCopy() *ClusterPoolList { + if in == nil { + return nil + } + out := new(ClusterPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPoolReference. +func (in *ClusterPoolReference) DeepCopy() *ClusterPoolReference { + if in == nil { + return nil + } + out := new(ClusterPoolReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { + *out = *in + in.Platform.DeepCopyInto(&out.Platform) + if in.PullSecretRef != nil { + in, out := &in.PullSecretRef, &out.PullSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(int32) + **out = **in + } + if in.MaxConcurrent != nil { + in, out := &in.MaxConcurrent, &out.MaxConcurrent + *out = new(int32) + **out = **in + } + out.ImageSetRef = in.ImageSetRef + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.InstallConfigSecretTemplateRef != nil { + in, out := &in.InstallConfigSecretTemplateRef, &out.InstallConfigSecretTemplateRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.HibernateAfter != nil { + in, out := &in.HibernateAfter, &out.HibernateAfter + *out = new(metav1.Duration) + **out = **in + } + if in.ClaimLifetime != nil { + in, out := &in.ClaimLifetime, &out.ClaimLifetime + *out = new(ClusterPoolClaimLifetime) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPoolSpec. +func (in *ClusterPoolSpec) DeepCopy() *ClusterPoolSpec { + if in == nil { + return nil + } + out := new(ClusterPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPoolStatus) DeepCopyInto(out *ClusterPoolStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterPoolCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPoolStatus. +func (in *ClusterPoolStatus) DeepCopy() *ClusterPoolStatus { + if in == nil { + return nil + } + out := new(ClusterPoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterProvision) DeepCopyInto(out *ClusterProvision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProvision. +func (in *ClusterProvision) DeepCopy() *ClusterProvision { + if in == nil { + return nil + } + out := new(ClusterProvision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterProvision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterProvisionCondition) DeepCopyInto(out *ClusterProvisionCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProvisionCondition. +func (in *ClusterProvisionCondition) DeepCopy() *ClusterProvisionCondition { + if in == nil { + return nil + } + out := new(ClusterProvisionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterProvisionList) DeepCopyInto(out *ClusterProvisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterProvision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProvisionList. +func (in *ClusterProvisionList) DeepCopy() *ClusterProvisionList { + if in == nil { + return nil + } + out := new(ClusterProvisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterProvisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterProvisionSpec) DeepCopyInto(out *ClusterProvisionSpec) { + *out = *in + out.ClusterDeploymentRef = in.ClusterDeploymentRef + in.PodSpec.DeepCopyInto(&out.PodSpec) + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.InfraID != nil { + in, out := &in.InfraID, &out.InfraID + *out = new(string) + **out = **in + } + if in.InstallLog != nil { + in, out := &in.InstallLog, &out.InstallLog + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.AdminKubeconfigSecretRef != nil { + in, out := &in.AdminKubeconfigSecretRef, &out.AdminKubeconfigSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.AdminPasswordSecretRef != nil { + in, out := &in.AdminPasswordSecretRef, &out.AdminPasswordSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.PrevClusterID != nil { + in, out := &in.PrevClusterID, &out.PrevClusterID + *out = new(string) + **out = **in + } + if in.PrevInfraID != nil { + in, out := &in.PrevInfraID, &out.PrevInfraID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProvisionSpec. +func (in *ClusterProvisionSpec) DeepCopy() *ClusterProvisionSpec { + if in == nil { + return nil + } + out := new(ClusterProvisionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterProvisionStatus) DeepCopyInto(out *ClusterProvisionStatus) { + *out = *in + if in.JobRef != nil { + in, out := &in.JobRef, &out.JobRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterProvisionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProvisionStatus. +func (in *ClusterProvisionStatus) DeepCopy() *ClusterProvisionStatus { + if in == nil { + return nil + } + out := new(ClusterProvisionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRelocate) DeepCopyInto(out *ClusterRelocate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRelocate. +func (in *ClusterRelocate) DeepCopy() *ClusterRelocate { + if in == nil { + return nil + } + out := new(ClusterRelocate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRelocate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRelocateList) DeepCopyInto(out *ClusterRelocateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRelocate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRelocateList. +func (in *ClusterRelocateList) DeepCopy() *ClusterRelocateList { + if in == nil { + return nil + } + out := new(ClusterRelocateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRelocateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRelocateSpec) DeepCopyInto(out *ClusterRelocateSpec) { + *out = *in + out.KubeconfigSecretRef = in.KubeconfigSecretRef + in.ClusterDeploymentSelector.DeepCopyInto(&out.ClusterDeploymentSelector) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRelocateSpec. +func (in *ClusterRelocateSpec) DeepCopy() *ClusterRelocateSpec { + if in == nil { + return nil + } + out := new(ClusterRelocateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRelocateStatus) DeepCopyInto(out *ClusterRelocateStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRelocateStatus. +func (in *ClusterRelocateStatus) DeepCopy() *ClusterRelocateStatus { + if in == nil { + return nil + } + out := new(ClusterRelocateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterState) DeepCopyInto(out *ClusterState) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterState. +func (in *ClusterState) DeepCopy() *ClusterState { + if in == nil { + return nil + } + out := new(ClusterState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterState) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStateList) DeepCopyInto(out *ClusterStateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStateList. +func (in *ClusterStateList) DeepCopy() *ClusterStateList { + if in == nil { + return nil + } + out := new(ClusterStateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStateSpec) DeepCopyInto(out *ClusterStateSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStateSpec. +func (in *ClusterStateSpec) DeepCopy() *ClusterStateSpec { + if in == nil { + return nil + } + out := new(ClusterStateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStateStatus) DeepCopyInto(out *ClusterStateStatus) { + *out = *in + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.ClusterOperators != nil { + in, out := &in.ClusterOperators, &out.ClusterOperators + *out = make([]ClusterOperatorState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStateStatus. +func (in *ClusterStateStatus) DeepCopy() *ClusterStateStatus { + if in == nil { + return nil + } + out := new(ClusterStateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneAdditionalCertificate) DeepCopyInto(out *ControlPlaneAdditionalCertificate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneAdditionalCertificate. +func (in *ControlPlaneAdditionalCertificate) DeepCopy() *ControlPlaneAdditionalCertificate { + if in == nil { + return nil + } + out := new(ControlPlaneAdditionalCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneConfigSpec) DeepCopyInto(out *ControlPlaneConfigSpec) { + *out = *in + in.ServingCertificates.DeepCopyInto(&out.ServingCertificates) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneConfigSpec. +func (in *ControlPlaneConfigSpec) DeepCopy() *ControlPlaneConfigSpec { + if in == nil { + return nil + } + out := new(ControlPlaneConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneServingCertificateSpec) DeepCopyInto(out *ControlPlaneServingCertificateSpec) { + *out = *in + if in.Additional != nil { + in, out := &in.Additional, &out.Additional + *out = make([]ControlPlaneAdditionalCertificate, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneServingCertificateSpec. +func (in *ControlPlaneServingCertificateSpec) DeepCopy() *ControlPlaneServingCertificateSpec { + if in == nil { + return nil + } + out := new(ControlPlaneServingCertificateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerConfig) DeepCopyInto(out *ControllerConfig) { + *out = *in + if in.ConcurrentReconciles != nil { + in, out := &in.ConcurrentReconciles, &out.ConcurrentReconciles + *out = new(int32) + **out = **in + } + if in.ClientQPS != nil { + in, out := &in.ClientQPS, &out.ClientQPS + *out = new(int32) + **out = **in + } + if in.ClientBurst != nil { + in, out := &in.ClientBurst, &out.ClientBurst + *out = new(int32) + **out = **in + } + if in.QueueQPS != nil { + in, out := &in.QueueQPS, &out.QueueQPS + *out = new(int32) + **out = **in + } + if in.QueueBurst != nil { + in, out := &in.QueueBurst, &out.QueueBurst + *out = new(int32) + **out = **in + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfig. +func (in *ControllerConfig) DeepCopy() *ControllerConfig { + if in == nil { + return nil + } + out := new(ControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ControllerNames) DeepCopyInto(out *ControllerNames) { + { + in := &in + *out = make(ControllerNames, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerNames. +func (in ControllerNames) DeepCopy() ControllerNames { + if in == nil { + return nil + } + out := new(ControllerNames) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllersConfig) DeepCopyInto(out *ControllersConfig) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(ControllerConfig) + (*in).DeepCopyInto(*out) + } + if in.Controllers != nil { + in, out := &in.Controllers, &out.Controllers + *out = make([]SpecificControllerConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllersConfig. +func (in *ControllersConfig) DeepCopy() *ControllersConfig { + if in == nil { + return nil + } + out := new(ControllersConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZone) DeepCopyInto(out *DNSZone) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone. +func (in *DNSZone) DeepCopy() *DNSZone { + if in == nil { + return nil + } + out := new(DNSZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSZone) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneCondition) DeepCopyInto(out *DNSZoneCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneCondition. +func (in *DNSZoneCondition) DeepCopy() *DNSZoneCondition { + if in == nil { + return nil + } + out := new(DNSZoneCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneList) DeepCopyInto(out *DNSZoneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSZone, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneList. +func (in *DNSZoneList) DeepCopy() *DNSZoneList { + if in == nil { + return nil + } + out := new(DNSZoneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSZoneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneSpec) DeepCopyInto(out *DNSZoneSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSDNSZoneSpec) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPDNSZoneSpec) + **out = **in + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureDNSZoneSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneSpec. +func (in *DNSZoneSpec) DeepCopy() *DNSZoneSpec { + if in == nil { + return nil + } + out := new(DNSZoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneStatus) DeepCopyInto(out *DNSZoneStatus) { + *out = *in + if in.LastSyncTimestamp != nil { + in, out := &in.LastSyncTimestamp, &out.LastSyncTimestamp + *out = (*in).DeepCopy() + } + if in.NameServers != nil { + in, out := &in.NameServers, &out.NameServers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSDNSZoneStatus) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPDNSZoneStatus) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureDNSZoneStatus) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DNSZoneCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneStatus. +func (in *DNSZoneStatus) DeepCopy() *DNSZoneStatus { + if in == nil { + return nil + } + out := new(DNSZoneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailedProvisionAWSConfig) DeepCopyInto(out *FailedProvisionAWSConfig) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedProvisionAWSConfig. +func (in *FailedProvisionAWSConfig) DeepCopy() *FailedProvisionAWSConfig { + if in == nil { + return nil + } + out := new(FailedProvisionAWSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailedProvisionConfig) DeepCopyInto(out *FailedProvisionConfig) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(FailedProvisionAWSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedProvisionConfig. +func (in *FailedProvisionConfig) DeepCopy() *FailedProvisionConfig { + if in == nil { + return nil + } + out := new(FailedProvisionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) { + *out = *in + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(FeatureGatesEnabled) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection. +func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection { + if in == nil { + return nil + } + out := new(FeatureGateSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGatesEnabled) DeepCopyInto(out *FeatureGatesEnabled) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGatesEnabled. +func (in *FeatureGatesEnabled) DeepCopy() *FeatureGatesEnabled { + if in == nil { + return nil + } + out := new(FeatureGatesEnabled) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPClusterDeprovision) DeepCopyInto(out *GCPClusterDeprovision) { + *out = *in + if in.CredentialsSecretRef != nil { + in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPClusterDeprovision. +func (in *GCPClusterDeprovision) DeepCopy() *GCPClusterDeprovision { + if in == nil { + return nil + } + out := new(GCPClusterDeprovision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPDNSZoneSpec) DeepCopyInto(out *GCPDNSZoneSpec) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDNSZoneSpec. +func (in *GCPDNSZoneSpec) DeepCopy() *GCPDNSZoneSpec { + if in == nil { + return nil + } + out := new(GCPDNSZoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPDNSZoneStatus) DeepCopyInto(out *GCPDNSZoneStatus) { + *out = *in + if in.ZoneName != nil { + in, out := &in.ZoneName, &out.ZoneName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDNSZoneStatus. +func (in *GCPDNSZoneStatus) DeepCopy() *GCPDNSZoneStatus { + if in == nil { + return nil + } + out := new(GCPDNSZoneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveConfig) DeepCopyInto(out *HiveConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveConfig. +func (in *HiveConfig) DeepCopy() *HiveConfig { + if in == nil { + return nil + } + out := new(HiveConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HiveConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveConfigList) DeepCopyInto(out *HiveConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HiveConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveConfigList. +func (in *HiveConfigList) DeepCopy() *HiveConfigList { + if in == nil { + return nil + } + out := new(HiveConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HiveConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveConfigSpec) DeepCopyInto(out *HiveConfigSpec) { + *out = *in + if in.ManagedDomains != nil { + in, out := &in.ManagedDomains, &out.ManagedDomains + *out = make([]ManageDNSConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalCertificateAuthoritiesSecretRef != nil { + in, out := &in.AdditionalCertificateAuthoritiesSecretRef, &out.AdditionalCertificateAuthoritiesSecretRef + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.GlobalPullSecretRef != nil { + in, out := &in.GlobalPullSecretRef, &out.GlobalPullSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + in.Backup.DeepCopyInto(&out.Backup) + in.FailedProvisionConfig.DeepCopyInto(&out.FailedProvisionConfig) + if in.MaintenanceMode != nil { + in, out := &in.MaintenanceMode, &out.MaintenanceMode + *out = new(bool) + **out = **in + } + if in.DeprovisionsDisabled != nil { + in, out := &in.DeprovisionsDisabled, &out.DeprovisionsDisabled + *out = new(bool) + **out = **in + } + if in.DisabledControllers != nil { + in, out := &in.DisabledControllers, &out.DisabledControllers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ControllersConfig != nil { + in, out := &in.ControllersConfig, &out.ControllersConfig + *out = new(ControllersConfig) + (*in).DeepCopyInto(*out) + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = new(FeatureGateSelection) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveConfigSpec. +func (in *HiveConfigSpec) DeepCopy() *HiveConfigSpec { + if in == nil { + return nil + } + out := new(HiveConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveConfigStatus) DeepCopyInto(out *HiveConfigStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveConfigStatus. +func (in *HiveConfigStatus) DeepCopy() *HiveConfigStatus { + if in == nil { + return nil + } + out := new(HiveConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderStatus) DeepCopyInto(out *IdentityProviderStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderStatus. +func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { + if in == nil { + return nil + } + out := new(IdentityProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallStrategy) DeepCopyInto(out *InstallStrategy) { + *out = *in + if in.Agent != nil { + in, out := &in.Agent, &out.Agent + *out = new(agent.InstallStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallStrategy. +func (in *InstallStrategy) DeepCopy() *InstallStrategy { + if in == nil { + return nil + } + out := new(InstallStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallStrategyStatus) DeepCopyInto(out *InstallStrategyStatus) { + *out = *in + if in.Agent != nil { + in, out := &in.Agent, &out.Agent + *out = new(agent.InstallStrategyStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallStrategyStatus. +func (in *InstallStrategyStatus) DeepCopy() *InstallStrategyStatus { + if in == nil { + return nil + } + out := new(InstallStrategyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigSecretReference. +func (in *KubeconfigSecretReference) DeepCopy() *KubeconfigSecretReference { + if in == nil { + return nil + } + out := new(KubeconfigSecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachinePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolAutoscaling) DeepCopyInto(out *MachinePoolAutoscaling) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolAutoscaling. +func (in *MachinePoolAutoscaling) DeepCopy() *MachinePoolAutoscaling { + if in == nil { + return nil + } + out := new(MachinePoolAutoscaling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolCondition) DeepCopyInto(out *MachinePoolCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolCondition. +func (in *MachinePoolCondition) DeepCopy() *MachinePoolCondition { + if in == nil { + return nil + } + out := new(MachinePoolCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolList) DeepCopyInto(out *MachinePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachinePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolList. +func (in *MachinePoolList) DeepCopy() *MachinePoolList { + if in == nil { + return nil + } + out := new(MachinePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachinePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolNameLease) DeepCopyInto(out *MachinePoolNameLease) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolNameLease. +func (in *MachinePoolNameLease) DeepCopy() *MachinePoolNameLease { + if in == nil { + return nil + } + out := new(MachinePoolNameLease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachinePoolNameLease) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolNameLeaseList) DeepCopyInto(out *MachinePoolNameLeaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachinePoolNameLease, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolNameLeaseList. +func (in *MachinePoolNameLeaseList) DeepCopy() *MachinePoolNameLeaseList { + if in == nil { + return nil + } + out := new(MachinePoolNameLeaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachinePoolNameLeaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolNameLeaseSpec) DeepCopyInto(out *MachinePoolNameLeaseSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolNameLeaseSpec. +func (in *MachinePoolNameLeaseSpec) DeepCopy() *MachinePoolNameLeaseSpec { + if in == nil { + return nil + } + out := new(MachinePoolNameLeaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolNameLeaseStatus) DeepCopyInto(out *MachinePoolNameLeaseStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolNameLeaseStatus. +func (in *MachinePoolNameLeaseStatus) DeepCopy() *MachinePoolNameLeaseStatus { + if in == nil { + return nil + } + out := new(MachinePoolNameLeaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolPlatform) DeepCopyInto(out *MachinePoolPlatform) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(aws.MachinePoolPlatform) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(azure.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(gcp.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(openstack.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(vsphere.MachinePool) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(ovirt.MachinePool) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolPlatform. +func (in *MachinePoolPlatform) DeepCopy() *MachinePoolPlatform { + if in == nil { + return nil + } + out := new(MachinePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolSpec) DeepCopyInto(out *MachinePoolSpec) { + *out = *in + out.ClusterDeploymentRef = in.ClusterDeploymentRef + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int64) + **out = **in + } + if in.Autoscaling != nil { + in, out := &in.Autoscaling, &out.Autoscaling + *out = new(MachinePoolAutoscaling) + **out = **in + } + in.Platform.DeepCopyInto(&out.Platform) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]corev1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolSpec. +func (in *MachinePoolSpec) DeepCopy() *MachinePoolSpec { + if in == nil { + return nil + } + out := new(MachinePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolStatus) DeepCopyInto(out *MachinePoolStatus) { + *out = *in + if in.MachineSets != nil { + in, out := &in.MachineSets, &out.MachineSets + *out = make([]MachineSetStatus, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]MachinePoolCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolStatus. +func (in *MachinePoolStatus) DeepCopy() *MachinePoolStatus { + if in == nil { + return nil + } + out := new(MachinePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. +func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { + if in == nil { + return nil + } + out := new(MachineSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManageDNSAWSConfig) DeepCopyInto(out *ManageDNSAWSConfig) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageDNSAWSConfig. +func (in *ManageDNSAWSConfig) DeepCopy() *ManageDNSAWSConfig { + if in == nil { + return nil + } + out := new(ManageDNSAWSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManageDNSAzureConfig) DeepCopyInto(out *ManageDNSAzureConfig) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageDNSAzureConfig. +func (in *ManageDNSAzureConfig) DeepCopy() *ManageDNSAzureConfig { + if in == nil { + return nil + } + out := new(ManageDNSAzureConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManageDNSConfig) DeepCopyInto(out *ManageDNSConfig) { + *out = *in + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(ManageDNSAWSConfig) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(ManageDNSGCPConfig) + **out = **in + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(ManageDNSAzureConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageDNSConfig. +func (in *ManageDNSConfig) DeepCopy() *ManageDNSConfig { + if in == nil { + return nil + } + out := new(ManageDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManageDNSGCPConfig) DeepCopyInto(out *ManageDNSGCPConfig) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageDNSGCPConfig. +func (in *ManageDNSGCPConfig) DeepCopy() *ManageDNSGCPConfig { + if in == nil { + return nil + } + out := new(ManageDNSGCPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackClusterDeprovision) DeepCopyInto(out *OpenStackClusterDeprovision) { + *out = *in + if in.CredentialsSecretRef != nil { + in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.CertificatesSecretRef != nil { + in, out := &in.CertificatesSecretRef, &out.CertificatesSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackClusterDeprovision. +func (in *OpenStackClusterDeprovision) DeepCopy() *OpenStackClusterDeprovision { + if in == nil { + return nil + } + out := new(OpenStackClusterDeprovision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtClusterDeprovision) DeepCopyInto(out *OvirtClusterDeprovision) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + out.CertificatesSecretRef = in.CertificatesSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtClusterDeprovision. +func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision { + if in == nil { + return nil + } + out := new(OvirtClusterDeprovision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(aws.Platform) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(azure.Platform) + **out = **in + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(baremetal.Platform) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(gcp.Platform) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(openstack.Platform) + (*in).DeepCopyInto(*out) + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(vsphere.Platform) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(ovirt.Platform) + **out = **in + } + if in.AgentBareMetal != nil { + in, out := &in.AgentBareMetal, &out.AgentBareMetal + *out = new(agent.BareMetalPlatform) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Provisioning) DeepCopyInto(out *Provisioning) { + *out = *in + if in.InstallConfigSecretRef != nil { + in, out := &in.InstallConfigSecretRef, &out.InstallConfigSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.ImageSetRef != nil { + in, out := &in.ImageSetRef, &out.ImageSetRef + *out = new(ClusterImageSetReference) + **out = **in + } + if in.ManifestsConfigMapRef != nil { + in, out := &in.ManifestsConfigMapRef, &out.ManifestsConfigMapRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.SSHPrivateKeySecretRef != nil { + in, out := &in.SSHPrivateKeySecretRef, &out.SSHPrivateKeySecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.SSHKnownHosts != nil { + in, out := &in.SSHKnownHosts, &out.SSHKnownHosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.InstallerEnv != nil { + in, out := &in.InstallerEnv, &out.InstallerEnv + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstallStrategy != nil { + in, out := &in.InstallStrategy, &out.InstallStrategy + *out = new(InstallStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provisioning. +func (in *Provisioning) DeepCopy() *Provisioning { + if in == nil { + return nil + } + out := new(Provisioning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretMapping) DeepCopyInto(out *SecretMapping) { + *out = *in + out.SourceRef = in.SourceRef + out.TargetRef = in.TargetRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretMapping. +func (in *SecretMapping) DeepCopy() *SecretMapping { + if in == nil { + return nil + } + out := new(SecretMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSyncIdentityProvider) DeepCopyInto(out *SelectorSyncIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSyncIdentityProvider. +func (in *SelectorSyncIdentityProvider) DeepCopy() *SelectorSyncIdentityProvider { + if in == nil { + return nil + } + out := new(SelectorSyncIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelectorSyncIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSyncIdentityProviderList) DeepCopyInto(out *SelectorSyncIdentityProviderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SelectorSyncIdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSyncIdentityProviderList. +func (in *SelectorSyncIdentityProviderList) DeepCopy() *SelectorSyncIdentityProviderList { + if in == nil { + return nil + } + out := new(SelectorSyncIdentityProviderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelectorSyncIdentityProviderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSyncIdentityProviderSpec) DeepCopyInto(out *SelectorSyncIdentityProviderSpec) { + *out = *in + in.SyncIdentityProviderCommonSpec.DeepCopyInto(&out.SyncIdentityProviderCommonSpec) + in.ClusterDeploymentSelector.DeepCopyInto(&out.ClusterDeploymentSelector) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSyncIdentityProviderSpec. +func (in *SelectorSyncIdentityProviderSpec) DeepCopy() *SelectorSyncIdentityProviderSpec { + if in == nil { + return nil + } + out := new(SelectorSyncIdentityProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSyncSet) DeepCopyInto(out *SelectorSyncSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSyncSet. +func (in *SelectorSyncSet) DeepCopy() *SelectorSyncSet { + if in == nil { + return nil + } + out := new(SelectorSyncSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelectorSyncSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSyncSetList) DeepCopyInto(out *SelectorSyncSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SelectorSyncSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSyncSetList. +func (in *SelectorSyncSetList) DeepCopy() *SelectorSyncSetList { + if in == nil { + return nil + } + out := new(SelectorSyncSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelectorSyncSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSyncSetSpec) DeepCopyInto(out *SelectorSyncSetSpec) { + *out = *in + in.SyncSetCommonSpec.DeepCopyInto(&out.SyncSetCommonSpec) + in.ClusterDeploymentSelector.DeepCopyInto(&out.ClusterDeploymentSelector) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSyncSetSpec. +func (in *SelectorSyncSetSpec) DeepCopy() *SelectorSyncSetSpec { + if in == nil { + return nil + } + out := new(SelectorSyncSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSyncSetStatus) DeepCopyInto(out *SelectorSyncSetStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSyncSetStatus. +func (in *SelectorSyncSetStatus) DeepCopy() *SelectorSyncSetStatus { + if in == nil { + return nil + } + out := new(SelectorSyncSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecificControllerConfig) DeepCopyInto(out *SpecificControllerConfig) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecificControllerConfig. +func (in *SpecificControllerConfig) DeepCopy() *SpecificControllerConfig { + if in == nil { + return nil + } + out := new(SpecificControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncCondition) DeepCopyInto(out *SyncCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncCondition. +func (in *SyncCondition) DeepCopy() *SyncCondition { + if in == nil { + return nil + } + out := new(SyncCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncIdentityProvider) DeepCopyInto(out *SyncIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncIdentityProvider. +func (in *SyncIdentityProvider) DeepCopy() *SyncIdentityProvider { + if in == nil { + return nil + } + out := new(SyncIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SyncIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncIdentityProviderCommonSpec) DeepCopyInto(out *SyncIdentityProviderCommonSpec) { + *out = *in + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]configv1.IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncIdentityProviderCommonSpec. +func (in *SyncIdentityProviderCommonSpec) DeepCopy() *SyncIdentityProviderCommonSpec { + if in == nil { + return nil + } + out := new(SyncIdentityProviderCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncIdentityProviderList) DeepCopyInto(out *SyncIdentityProviderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SyncIdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncIdentityProviderList. +func (in *SyncIdentityProviderList) DeepCopy() *SyncIdentityProviderList { + if in == nil { + return nil + } + out := new(SyncIdentityProviderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SyncIdentityProviderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncIdentityProviderSpec) DeepCopyInto(out *SyncIdentityProviderSpec) { + *out = *in + in.SyncIdentityProviderCommonSpec.DeepCopyInto(&out.SyncIdentityProviderCommonSpec) + if in.ClusterDeploymentRefs != nil { + in, out := &in.ClusterDeploymentRefs, &out.ClusterDeploymentRefs + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncIdentityProviderSpec. +func (in *SyncIdentityProviderSpec) DeepCopy() *SyncIdentityProviderSpec { + if in == nil { + return nil + } + out := new(SyncIdentityProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncObjectPatch) DeepCopyInto(out *SyncObjectPatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncObjectPatch. +func (in *SyncObjectPatch) DeepCopy() *SyncObjectPatch { + if in == nil { + return nil + } + out := new(SyncObjectPatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncSet) DeepCopyInto(out *SyncSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncSet. +func (in *SyncSet) DeepCopy() *SyncSet { + if in == nil { + return nil + } + out := new(SyncSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SyncSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncSetCommonSpec) DeepCopyInto(out *SyncSetCommonSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Patches != nil { + in, out := &in.Patches, &out.Patches + *out = make([]SyncObjectPatch, len(*in)) + copy(*out, *in) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretMapping, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncSetCommonSpec. +func (in *SyncSetCommonSpec) DeepCopy() *SyncSetCommonSpec { + if in == nil { + return nil + } + out := new(SyncSetCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncSetList) DeepCopyInto(out *SyncSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SyncSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncSetList. +func (in *SyncSetList) DeepCopy() *SyncSetList { + if in == nil { + return nil + } + out := new(SyncSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SyncSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncSetObjectStatus) DeepCopyInto(out *SyncSetObjectStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]SyncStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Patches != nil { + in, out := &in.Patches, &out.Patches + *out = make([]SyncStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SyncStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]SyncCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncSetObjectStatus. +func (in *SyncSetObjectStatus) DeepCopy() *SyncSetObjectStatus { + if in == nil { + return nil + } + out := new(SyncSetObjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncSetSpec) DeepCopyInto(out *SyncSetSpec) { + *out = *in + in.SyncSetCommonSpec.DeepCopyInto(&out.SyncSetCommonSpec) + if in.ClusterDeploymentRefs != nil { + in, out := &in.ClusterDeploymentRefs, &out.ClusterDeploymentRefs + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncSetSpec. +func (in *SyncSetSpec) DeepCopy() *SyncSetSpec { + if in == nil { + return nil + } + out := new(SyncSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncSetStatus) DeepCopyInto(out *SyncSetStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncSetStatus. +func (in *SyncSetStatus) DeepCopy() *SyncSetStatus { + if in == nil { + return nil + } + out := new(SyncSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncStatus) DeepCopyInto(out *SyncStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]SyncCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncStatus. +func (in *SyncStatus) DeepCopy() *SyncStatus { + if in == nil { + return nil + } + out := new(SyncStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereClusterDeprovision) DeepCopyInto(out *VSphereClusterDeprovision) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef + out.CertificatesSecretRef = in.CertificatesSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereClusterDeprovision. +func (in *VSphereClusterDeprovision) DeepCopy() *VSphereClusterDeprovision { + if in == nil { + return nil + } + out := new(VSphereClusterDeprovision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VeleroBackupConfig) DeepCopyInto(out *VeleroBackupConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VeleroBackupConfig. +func (in *VeleroBackupConfig) DeepCopy() *VeleroBackupConfig { + if in == nil { + return nil + } + out := new(VeleroBackupConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/clustersync_types.go b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/clustersync_types.go new file mode 100644 index 00000000000..b9f5a72f2fd --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/clustersync_types.go @@ -0,0 +1,144 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterSync is the status of all of the SelectorSyncSets and SyncSets that apply to a ClusterDeployment. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clustersyncs,shortName=csync,scope=Namespaced +type ClusterSync struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSyncSpec `json:"spec,omitempty"` + Status ClusterSyncStatus `json:"status,omitempty"` +} + +// ClusterSyncSpec defines the desired state of ClusterSync +type ClusterSyncSpec struct{} + +// ClusterSyncStatus defines the observed state of ClusterSync +type ClusterSyncStatus struct { + // SyncSets is the sync status of all of the SyncSets for the cluster. + // +optional + SyncSets []SyncStatus `json:"syncSets,omitempty"` + + // SelectorSyncSets is the sync status of all of the SelectorSyncSets for the cluster. + // +optional + SelectorSyncSets []SyncStatus `json:"selectorSyncSets,omitempty"` + + // Conditions is a list of conditions associated with syncing to the cluster. + // +optional + Conditions []ClusterSyncCondition `json:"conditions,omitempty"` + + // FirstSuccessTime is the time we first successfully applied all (selector)syncsets to a cluster. + // +optional + FirstSuccessTime *metav1.Time `json:"firstSuccessTime,omitempty"` +} + +// SyncStatus is the status of applying a specific SyncSet or SelectorSyncSet to the cluster. +type SyncStatus struct { + // Name is the name of the SyncSet or SelectorSyncSet. + Name string `json:"name"` + + // ObservedGeneration is the generation of the SyncSet or SelectorSyncSet that was last observed. + ObservedGeneration int64 `json:"observedGeneration"` + + // ResourcesToDelete is the list of resources in the cluster that should be deleted when the SyncSet or SelectorSyncSet + // is deleted or is no longer matched to the cluster. + // +optional + ResourcesToDelete []SyncResourceReference `json:"resourcesToDelete,omitempty"` + + // Result is the result of the last attempt to apply the SyncSet or SelectorSyncSet to the cluster. + Result SyncSetResult `json:"result"` + + // FailureMessage is a message describing why the SyncSet or SelectorSyncSet could not be applied. This is only + // set when Result is Failure. + // +optional + FailureMessage string `json:"failureMessage,omitempty"` + + // LastTransitionTime is the time when this status last changed. + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // FirstSuccessTime is the time when the SyncSet or SelectorSyncSet was first successfully applied to the cluster. + // +optional + FirstSuccessTime *metav1.Time `json:"firstSuccessTime,omitempty"` +} + +// SyncResourceReference is a reference to a resource that is synced to a cluster via a SyncSet or SelectorSyncSet. +type SyncResourceReference struct { + // APIVersion is the Group and Version of the resource. + APIVersion string `json:"apiVersion"` + + // Kind is the Kind of the resource. + // +optional + Kind string `json:"kind"` + + // Name is the name of the resource. + Name string `json:"name"` + + // Namespace is the namespace of the resource. + // +optional + Namespace string `json:"namespace,omitempty"` +} + +// SyncSetResult is the result of a sync attempt. +// +kubebuilder:validation:Enum=Success;Failure +type SyncSetResult string + +const ( + // SuccessSyncSetResult is the result when the SyncSet or SelectorSyncSet was applied successfully to the cluster. + SuccessSyncSetResult SyncSetResult = "Success" + + // FailureSyncSetResult is the result when there was an error when attempting to apply the SyncSet or SelectorSyncSet + // to the cluster + FailureSyncSetResult SyncSetResult = "Failure" +) + +// ClusterSyncCondition contains details for the current condition of a ClusterSync +type ClusterSyncCondition struct { + // Type is the type of the condition. + Type ClusterSyncConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about the last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterSyncConditionType is a valid value for ClusterSyncCondition.Type +type ClusterSyncConditionType string + +const ( + // ClusterSyncFailed is the type of condition used to indicate whether there are SyncSets or SelectorSyncSets which + // have not been applied due to an error. + ClusterSyncFailed ClusterSyncConditionType = "Failed" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterSyncList contains a list of ClusterSync +type ClusterSyncList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterSync `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterSync{}, &ClusterSyncList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/clustersynclease_types.go b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/clustersynclease_types.go new file mode 100644 index 00000000000..aa0285c90ac --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/clustersynclease_types.go @@ -0,0 +1,37 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterSyncLease is a record of the last time that SyncSets and SelectorSyncSets were applied to a cluster. +// +k8s:openapi-gen=true +// +kubebuilder:resource:path=clustersyncleases,shortName=csl,scope=Namespaced +type ClusterSyncLease struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSyncLeaseSpec `json:"spec,omitempty"` +} + +// ClusterSyncLeaseSpec is the specification of a ClusterSyncLease. +type ClusterSyncLeaseSpec struct { + // RenewTime is the time when SyncSets and SelectorSyncSets were last applied to the cluster. + RenewTime metav1.MicroTime `json:"renewTime"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterSyncLeaseList contains a list of ClusterSyncLeases. +type ClusterSyncLeaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterSyncLease `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterSyncLease{}, &ClusterSyncLeaseList{}) +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/doc.go b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/doc.go new file mode 100644 index 00000000000..70c44a8218e --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/doc.go @@ -0,0 +1,7 @@ +// Package v1alpha1 contains API Schema definitions for the hiveinternal v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hiveinternal +// +k8s:defaulter-gen=TypeMeta +// +groupName=hiveinternal.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/register.go b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/register.go new file mode 100644 index 00000000000..f1c232f4223 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/register.go @@ -0,0 +1,36 @@ +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha1 contains API Schema definitions for the hiveinternal v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hiveinternal +// +k8s:defaulter-gen=TypeMeta +// +groupName=hiveinternal.openshift.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" +) + +var ( + // HiveInternalAPIGroup is the group that all hiveinternal objects belong to in the API server. + HiveInternalAPIGroup = "hiveinternal.openshift.io" + + // HiveInternalAPIVersion is the api version that all hiveinternal objects are currently at. + HiveInternalAPIVersion = "v1alpha1" + + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: HiveInternalAPIGroup, Version: HiveInternalAPIVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is a shortcut for SchemeBuilder.AddToScheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..e08d44f4ab0 --- /dev/null +++ b/vendor/github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,264 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSync) DeepCopyInto(out *ClusterSync) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSync. +func (in *ClusterSync) DeepCopy() *ClusterSync { + if in == nil { + return nil + } + out := new(ClusterSync) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSync) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncCondition) DeepCopyInto(out *ClusterSyncCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncCondition. +func (in *ClusterSyncCondition) DeepCopy() *ClusterSyncCondition { + if in == nil { + return nil + } + out := new(ClusterSyncCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncLease) DeepCopyInto(out *ClusterSyncLease) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncLease. +func (in *ClusterSyncLease) DeepCopy() *ClusterSyncLease { + if in == nil { + return nil + } + out := new(ClusterSyncLease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSyncLease) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncLeaseList) DeepCopyInto(out *ClusterSyncLeaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterSyncLease, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncLeaseList. +func (in *ClusterSyncLeaseList) DeepCopy() *ClusterSyncLeaseList { + if in == nil { + return nil + } + out := new(ClusterSyncLeaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSyncLeaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncLeaseSpec) DeepCopyInto(out *ClusterSyncLeaseSpec) { + *out = *in + in.RenewTime.DeepCopyInto(&out.RenewTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncLeaseSpec. +func (in *ClusterSyncLeaseSpec) DeepCopy() *ClusterSyncLeaseSpec { + if in == nil { + return nil + } + out := new(ClusterSyncLeaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncList) DeepCopyInto(out *ClusterSyncList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterSync, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncList. +func (in *ClusterSyncList) DeepCopy() *ClusterSyncList { + if in == nil { + return nil + } + out := new(ClusterSyncList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSyncList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncSpec) DeepCopyInto(out *ClusterSyncSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncSpec. +func (in *ClusterSyncSpec) DeepCopy() *ClusterSyncSpec { + if in == nil { + return nil + } + out := new(ClusterSyncSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncStatus) DeepCopyInto(out *ClusterSyncStatus) { + *out = *in + if in.SyncSets != nil { + in, out := &in.SyncSets, &out.SyncSets + *out = make([]SyncStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SelectorSyncSets != nil { + in, out := &in.SelectorSyncSets, &out.SelectorSyncSets + *out = make([]SyncStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterSyncCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FirstSuccessTime != nil { + in, out := &in.FirstSuccessTime, &out.FirstSuccessTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncStatus. +func (in *ClusterSyncStatus) DeepCopy() *ClusterSyncStatus { + if in == nil { + return nil + } + out := new(ClusterSyncStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncResourceReference) DeepCopyInto(out *SyncResourceReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncResourceReference. +func (in *SyncResourceReference) DeepCopy() *SyncResourceReference { + if in == nil { + return nil + } + out := new(SyncResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncStatus) DeepCopyInto(out *SyncStatus) { + *out = *in + if in.ResourcesToDelete != nil { + in, out := &in.ResourcesToDelete, &out.ResourcesToDelete + *out = make([]SyncResourceReference, len(*in)) + copy(*out, *in) + } + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + if in.FirstSuccessTime != nil { + in, out := &in.FirstSuccessTime, &out.FirstSuccessTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncStatus. +func (in *SyncStatus) DeepCopy() *SyncStatus { + if in == nil { + return nil + } + out := new(SyncStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 69b699f1362..5b3b1b6fb82 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -567,6 +567,21 @@ github.com/openshift/generic-admission-server/pkg/apiserver github.com/openshift/generic-admission-server/pkg/cmd github.com/openshift/generic-admission-server/pkg/cmd/server github.com/openshift/generic-admission-server/pkg/registry/admissionreview +# github.com/openshift/hive/pkg/apis v0.0.0 => ./pkg/apis +## explicit +github.com/openshift/hive/pkg/apis +github.com/openshift/hive/pkg/apis/helpers +github.com/openshift/hive/pkg/apis/hive/v1 +github.com/openshift/hive/pkg/apis/hive/v1/agent +github.com/openshift/hive/pkg/apis/hive/v1/aws +github.com/openshift/hive/pkg/apis/hive/v1/azure +github.com/openshift/hive/pkg/apis/hive/v1/baremetal +github.com/openshift/hive/pkg/apis/hive/v1/gcp +github.com/openshift/hive/pkg/apis/hive/v1/openstack +github.com/openshift/hive/pkg/apis/hive/v1/ovirt +github.com/openshift/hive/pkg/apis/hive/v1/validating-webhooks +github.com/openshift/hive/pkg/apis/hive/v1/vsphere +github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1 # github.com/openshift/installer v0.9.0-master.0.20210201172249-df32ad26dd6f ## explicit github.com/openshift/installer/data @@ -1845,3 +1860,4 @@ sigs.k8s.io/yaml # google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 # google.golang.org/grpc => google.golang.org/grpc v1.29.1 # k8s.io/client-go => k8s.io/client-go v0.19.5 +# github.com/openshift/hive/pkg/apis => ./pkg/apis